/*
- * Copyright (c) 2009 Nicira Networks.
+ * Copyright (c) 2009, 2010 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "netflow.h"
#include "odp-util.h"
#include "ofp-print.h"
+#include "ofproto-sflow.h"
#include "ofpbuf.h"
#include "openflow/nicira-ext.h"
#include "openflow/openflow.h"
#define THIS_MODULE VLM_ofproto
#include "vlog.h"
-enum {
- DP_GROUP_FLOOD = 0,
- DP_GROUP_ALL = 1
-};
+#include "sflow_api.h"
enum {
TABLEID_HASH = 0,
const flow_t *flow, struct ofproto *ofproto,
const struct ofpbuf *packet,
struct odp_actions *out, tag_type *tags,
- bool *may_setup_flow);
+ bool *may_set_up_flow, uint16_t *nf_output_iface);
struct rule {
struct cls_rule cr;
uint64_t packet_count; /* Number of packets received. */
uint64_t byte_count; /* Number of bytes received. */
uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
- uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
- uint8_t ip_tos; /* Last-seen IP type-of-service. */
tag_type tags; /* Tags (set only by hooks). */
+ struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
/* If 'super' is non-NULL, this rule is a subrule, that is, it is an
* exact-match rule (having cr.wc.wildcards of 0) generated from the
return false;
}
-static struct rule *rule_create(struct rule *super, const union ofp_action *,
- size_t n_actions, uint16_t idle_timeout,
- uint16_t hard_timeout);
+static struct rule *rule_create(struct ofproto *, struct rule *super,
+ const union ofp_action *, size_t n_actions,
+ uint16_t idle_timeout, uint16_t hard_timeout);
static void rule_free(struct rule *);
static void rule_destroy(struct ofproto *, struct rule *);
static struct rule *rule_from_cls_rule(const struct cls_rule *);
struct pinsched *miss_sched, *action_sched;
struct executer *executer;
struct netflow *netflow;
+ struct ofproto_sflow *sflow;
/* Flow table. */
struct classifier cls;
static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
static void send_packet_in_action(struct ofpbuf *, void *ofproto);
static void update_used(struct ofproto *);
-static void update_stats(struct rule *, const struct odp_flow_stats *);
+static void update_stats(struct ofproto *, struct rule *,
+ const struct odp_flow_stats *);
static void expire_rule(struct cls_rule *, void *ofproto);
+static void active_timeout(struct ofproto *ofproto, struct rule *rule);
static bool revalidate_rule(struct ofproto *p, struct rule *rule);
static void revalidate_cb(struct cls_rule *rule_, void *p_);
static void handle_openflow(struct ofconn *, struct ofproto *,
struct ofpbuf *);
-static void refresh_port_group(struct ofproto *, unsigned int group);
+static void refresh_port_groups(struct ofproto *);
+
static void update_port(struct ofproto *, const char *devname);
static int init_ports(struct ofproto *);
static void reinit_ports(struct ofproto *);
dpif_close(dpif);
return error;
}
- error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION);
+ error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW);
if (error) {
VLOG_ERR("failed to listen on datapath %s: %s",
datapath, strerror(error));
p->miss_sched = p->action_sched = NULL;
p->executer = NULL;
p->netflow = NULL;
+ p->sflow = NULL;
/* Initialize flow table. */
classifier_init(&p->cls);
}
int
-ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors,
- uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface)
+ofproto_set_netflow(struct ofproto *ofproto,
+ const struct netflow_options *nf_options)
{
- if (collectors && collectors->n) {
+ if (nf_options->collectors.n) {
if (!ofproto->netflow) {
ofproto->netflow = netflow_create();
}
- netflow_set_engine(ofproto->netflow, engine_type, engine_id,
- add_id_to_iface);
- return netflow_set_collectors(ofproto->netflow, collectors);
+ return netflow_set_options(ofproto->netflow, nf_options);
} else {
netflow_destroy(ofproto->netflow);
ofproto->netflow = NULL;
}
}
+void
+ofproto_set_sflow(struct ofproto *ofproto,
+ const struct ofproto_sflow_options *oso)
+{
+ struct ofproto_sflow *os = ofproto->sflow;
+ if (oso) {
+ if (!os) {
+ struct ofport *ofport;
+ unsigned int odp_port;
+
+ os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif);
+ refresh_port_groups(ofproto);
+ PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) {
+ ofproto_sflow_add_port(os, odp_port,
+ netdev_get_name(ofport->netdev));
+ }
+ }
+ ofproto_sflow_set_options(os, oso);
+ } else {
+ ofproto_sflow_destroy(os);
+ ofproto->sflow = NULL;
+ }
+}
+
void
ofproto_set_failure(struct ofproto *ofproto, bool fail_open)
{
}
int
-ofproto_set_stp(struct ofproto *ofproto UNUSED, bool enable_stp)
+ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp)
{
/* XXX */
if (enable_stp) {
pinsched_destroy(p->action_sched);
executer_destroy(p->executer);
netflow_destroy(p->netflow);
+ ofproto_sflow_destroy(p->sflow);
switch_status_unregister(p->ss_cat);
}
}
}
- if (p->fail_open) {
- fail_open_run(p->fail_open);
- }
pinsched_run(p->miss_sched, send_packet_in_miss, p);
pinsched_run(p->action_sched, send_packet_in_action, p);
if (p->executer) {
ofconn_run(ofconn, p);
}
+ /* Fail-open maintenance. Do this after processing the ofconns since
+ * fail-open checks the status of the controller rconn. */
+ if (p->fail_open) {
+ fail_open_run(p->fail_open);
+ }
+
for (i = 0; i < p->n_listeners; i++) {
struct vconn *vconn;
int retval;
if (p->netflow) {
netflow_run(p->netflow);
}
+ if (p->sflow) {
+ ofproto_sflow_run(p->sflow);
+ }
return 0;
}
if (p->executer) {
executer_wait(p->executer);
}
+ if (p->sflow) {
+ ofproto_sflow_wait(p->sflow);
+ }
if (!tag_set_is_empty(&p->revalidate_set)) {
poll_immediate_wake();
}
int error;
error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions,
- NULL, NULL);
+ NULL, NULL, NULL);
if (error) {
return error;
}
int idle_timeout)
{
struct rule *rule;
- rule = rule_create(NULL, actions, n_actions,
+ rule = rule_create(p, NULL, actions, n_actions,
idle_timeout >= 0 ? idle_timeout : 5 /* XXX */, 0);
cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
rule_insert(p, rule, NULL, 0);
svec_destroy(&devnames);
}
-static void
+static size_t
refresh_port_group(struct ofproto *p, unsigned int group)
{
uint16_t *ports;
}
dpif_port_group_set(p->dpif, group, ports, n_ports);
free(ports);
+
+ return n_ports;
}
static void
refresh_port_groups(struct ofproto *p)
{
- refresh_port_group(p, DP_GROUP_FLOOD);
- refresh_port_group(p, DP_GROUP_ALL);
+ size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
+ size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
+ if (p->sflow) {
+ ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
+ }
}
static struct ofport *
static void
ofport_install(struct ofproto *p, struct ofport *ofport)
{
+ uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
+ const char *netdev_name = (const char *) ofport->opp.name;
+
netdev_monitor_add(p->netdev_monitor, ofport->netdev);
- port_array_set(&p->ports, ofp_port_to_odp_port(ofport->opp.port_no),
- ofport);
- shash_add(&p->port_by_name, (char *) ofport->opp.name, ofport);
+ port_array_set(&p->ports, odp_port, ofport);
+ shash_add(&p->port_by_name, netdev_name, ofport);
+ if (p->sflow) {
+ ofproto_sflow_add_port(p->sflow, odp_port, netdev_name);
+ }
}
static void
ofport_remove(struct ofproto *p, struct ofport *ofport)
{
+ uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no);
+
netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
- port_array_set(&p->ports, ofp_port_to_odp_port(ofport->opp.port_no), NULL);
+ port_array_set(&p->ports, odp_port, NULL);
shash_delete(&p->port_by_name,
shash_find(&p->port_by_name, (char *) ofport->opp.name));
+ if (p->sflow) {
+ ofproto_sflow_del_port(p->sflow, odp_port);
+ }
}
static void
if (!of_msg) {
break;
}
+ if (p->fail_open) {
+ fail_open_maybe_recover(p->fail_open);
+ }
handle_openflow(ofconn, p, of_msg);
ofpbuf_delete(of_msg);
}
/* Caller is responsible for initializing the 'cr' member of the returned
* rule. */
static struct rule *
-rule_create(struct rule *super,
+rule_create(struct ofproto *ofproto, struct rule *super,
const union ofp_action *actions, size_t n_actions,
uint16_t idle_timeout, uint16_t hard_timeout)
{
}
rule->n_actions = n_actions;
rule->actions = xmemdup(actions, n_actions * sizeof *actions);
+ netflow_flow_clear(&rule->nf_flow);
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
+
return rule;
}
if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
struct rule *super = rule->super ? rule->super : rule;
if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
- packet, &a, NULL, 0)) {
+ packet, &a, NULL, 0, NULL)) {
return;
}
actions = a.actions;
actions, n_actions, packet)) {
struct odp_flow_stats stats;
flow_extract_stats(flow, packet, &stats);
- update_stats(rule, &stats);
+ update_stats(ofproto, rule, &stats);
rule->used = time_msec();
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
}
}
rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
const flow_t *flow)
{
- struct rule *subrule = rule_create(rule, NULL, 0,
+ struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
rule->idle_timeout, rule->hard_timeout);
COVERAGE_INC(ofproto_subrule_create);
cls_rule_from_flow(&subrule->cr, flow, 0,
super = rule->super ? rule->super : rule;
rule->tags = 0;
xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
- packet, &a, &rule->tags, &rule->may_install);
+ packet, &a, &rule->tags, &rule->may_install,
+ &rule->nf_flow.output_iface);
actions_len = a.n_actions * sizeof *a.actions;
if (rule->n_odp_actions != a.n_actions
&put)) {
rule->installed = true;
if (displaced_rule) {
- update_stats(rule, &put.flow.stats);
+ update_stats(p, displaced_rule, &put.flow.stats);
rule_post_uninstall(p, displaced_rule);
}
}
static void
rule_update_actions(struct ofproto *ofproto, struct rule *rule)
{
- bool actions_changed = rule_make_actions(ofproto, rule, NULL);
+ bool actions_changed;
+ uint16_t new_out_iface, old_out_iface;
+
+ old_out_iface = rule->nf_flow.output_iface;
+ actions_changed = rule_make_actions(ofproto, rule, NULL);
+
if (rule->may_install) {
if (rule->installed) {
if (actions_changed) {
- /* XXX should really do rule_post_uninstall() for the *old* set
- * of actions, and distinguish the old stats from the new. */
struct odp_flow_put put;
- do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put);
+ do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY
+ | ODPPF_ZERO_STATS, &put);
+ update_stats(ofproto, rule, &put.flow.stats);
+
+ /* Temporarily set the old output iface so that NetFlow
+ * messages have the correct output interface for the old
+ * stats. */
+ new_out_iface = rule->nf_flow.output_iface;
+ rule->nf_flow.output_iface = old_out_iface;
+ rule_post_uninstall(ofproto, rule);
+ rule->nf_flow.output_iface = new_out_iface;
}
} else {
rule_install(ofproto, rule, NULL);
odp_flow.actions = NULL;
odp_flow.n_actions = 0;
if (!dpif_flow_del(p->dpif, &odp_flow)) {
- update_stats(rule, &odp_flow.stats);
+ update_stats(p, rule, &odp_flow.stats);
}
rule->installed = false;
}
}
+static bool
+is_controller_rule(struct rule *rule)
+{
+ /* If the only action is send to the controller then don't report
+ * NetFlow expiration messages since it is just part of the control
+ * logic for the network and not real traffic. */
+
+ if (rule && rule->super) {
+ struct rule *super = rule->super;
+
+ return super->n_actions == 1 &&
+ super->actions[0].type == htons(OFPAT_OUTPUT) &&
+ super->actions[0].output.port == htons(OFPP_CONTROLLER);
+ }
+
+ return false;
+}
+
static void
rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
{
struct rule *super = rule->super;
rule_account(ofproto, rule, 0);
- if (ofproto->netflow && rule->byte_count) {
+
+ if (ofproto->netflow && !is_controller_rule(rule)) {
struct ofexpired expired;
expired.flow = rule->cr.flow;
expired.packet_count = rule->packet_count;
expired.byte_count = rule->byte_count;
expired.used = rule->used;
- expired.created = rule->created;
- expired.tcp_flags = rule->tcp_flags;
- expired.ip_tos = rule->ip_tos;
- netflow_expire(ofproto->netflow, &expired);
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
}
if (super) {
super->packet_count += rule->packet_count;
super->byte_count += rule->byte_count;
- super->tcp_flags |= rule->tcp_flags;
- if (rule->packet_count) {
- super->ip_tos = rule->ip_tos;
- }
- }
- /* Reset counters to prevent double counting if the rule ever gets
- * reinstalled. */
- rule->packet_count = 0;
- rule->byte_count = 0;
- rule->accounted_bytes = 0;
- rule->tcp_flags = 0;
- rule->ip_tos = 0;
+ /* Reset counters to prevent double counting if the rule ever gets
+ * reinstalled. */
+ rule->packet_count = 0;
+ rule->byte_count = 0;
+ rule->accounted_bytes = 0;
+
+ netflow_flow_clear(&rule->nf_flow);
+ }
}
\f
static void
}
static void
-add_output_group_action(struct odp_actions *actions, uint16_t group)
+add_output_group_action(struct odp_actions *actions, uint16_t group,
+ uint16_t *nf_output_iface)
{
odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group;
+
+ if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
+ *nf_output_iface = NF_OUT_FLOOD;
+ }
}
static void
/* Output. */
struct odp_actions *out; /* Datapath actions. */
tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
- bool may_setup_flow; /* True ordinarily; false if the actions must
+ bool may_set_up_flow; /* True ordinarily; false if the actions must
* be reassessed for every packet. */
+ uint16_t nf_output_iface; /* Output interface index for NetFlow. */
};
static void do_xlate_actions(const union ofp_action *in, size_t n_in,
add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
{
const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
- if (!ofport || !(ofport->opp.config & OFPPC_NO_FWD)) {
- odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
+
+ if (ofport) {
+ if (ofport->opp.config & OFPPC_NO_FWD) {
+ /* Forwarding disabled on port. */
+ return;
+ }
+ } else {
+ /*
+ * We don't have an ofport record for this port, but it doesn't hurt to
+ * allow forwarding to it anyhow. Maybe such a port will appear later
+ * and we're pre-populating the flow table.
+ */
}
+
+ odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port;
+ ctx->nf_output_iface = port;
}
static struct rule *
const struct ofp_action_output *oao)
{
uint16_t odp_port;
+ uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+
+ ctx->nf_output_iface = NF_OUT_DROP;
switch (ntohs(oao->port)) {
case OFPP_IN_PORT:
case OFPP_NORMAL:
if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet,
ctx->out, ctx->tags,
+ &ctx->nf_output_iface,
ctx->ofproto->aux)) {
COVERAGE_INC(ofproto_uninstallable);
- ctx->may_setup_flow = false;
+ ctx->may_set_up_flow = false;
}
break;
case OFPP_FLOOD:
- add_output_group_action(ctx->out, DP_GROUP_FLOOD);
+ add_output_group_action(ctx->out, DP_GROUP_FLOOD,
+ &ctx->nf_output_iface);
break;
case OFPP_ALL:
- add_output_group_action(ctx->out, DP_GROUP_ALL);
+ add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
break;
case OFPP_CONTROLLER:
add_controller_action(ctx->out, oao);
}
break;
}
+
+ if (prev_nf_output_iface == NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = prev_nf_output_iface;
+ } else if (prev_nf_output_iface != NF_OUT_DROP &&
+ ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
+ }
}
static void
oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
break;
+ case OFPAT_SET_NW_DST:
+ oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST);
+ oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
+ break;
+
case OFPAT_SET_TP_SRC:
oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC);
oa->tp_port.tp_port = ia->tp_port.tp_port;
break;
+ case OFPAT_SET_TP_DST:
+ oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST);
+ oa->tp_port.tp_port = ia->tp_port.tp_port;
+ break;
+
case OFPAT_VENDOR:
xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
break;
xlate_actions(const union ofp_action *in, size_t n_in,
const flow_t *flow, struct ofproto *ofproto,
const struct ofpbuf *packet,
- struct odp_actions *out, tag_type *tags, bool *may_setup_flow)
+ struct odp_actions *out, tag_type *tags, bool *may_set_up_flow,
+ uint16_t *nf_output_iface)
{
tag_type no_tags = 0;
struct action_xlate_ctx ctx;
ctx.packet = packet;
ctx.out = out;
ctx.tags = tags ? tags : &no_tags;
- ctx.may_setup_flow = true;
+ ctx.may_set_up_flow = true;
+ ctx.nf_output_iface = NF_OUT_DROP;
do_xlate_actions(in, n_in, &ctx);
- /* Check with in-band control to see if we're allowed to setup this
+ /* Check with in-band control to see if we're allowed to set up this
* flow. */
if (!in_band_rule_check(ofproto->in_band, flow, out)) {
- ctx.may_setup_flow = false;
+ ctx.may_set_up_flow = false;
}
- if (may_setup_flow) {
- *may_setup_flow = ctx.may_setup_flow;
+ if (may_set_up_flow) {
+ *may_set_up_flow = ctx.may_set_up_flow;
+ }
+ if (nf_output_iface) {
+ *nf_output_iface = ctx.nf_output_iface;
}
if (odp_actions_overflow(out)) {
odp_actions_init(out);
if (opo->buffer_id != htonl(UINT32_MAX)) {
error = pktbuf_retrieve(ofconn->pktbuf, ntohl(opo->buffer_id),
&buffer, &in_port);
- if (error) {
+ if (error || !buffer) {
return error;
}
payload = *buffer;
flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow);
error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
- &flow, p, &payload, &actions, NULL, NULL);
+ &flow, p, &payload, &actions, NULL, NULL, NULL);
if (error) {
return error;
}
#undef REVALIDATE_BITS
if (mask & OFPPC_NO_FLOOD) {
port->opp.config ^= OFPPC_NO_FLOOD;
- refresh_port_group(p, DP_GROUP_FLOOD);
+ refresh_port_groups(p);
}
if (mask & OFPPC_NO_PACKET_IN) {
port->opp.config ^= OFPPC_NO_PACKET_IN;
struct odp_flow *odp_flows;
size_t n_odp_flows;
+ packet_count = rule->packet_count;
+ byte_count = rule->byte_count;
+
n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
odp_flows = xcalloc(1, n_odp_flows * sizeof *odp_flows);
if (rule->cr.wc.wildcards) {
size_t i = 0;
LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
odp_flows[i++].key = subrule->cr.flow;
+ packet_count += subrule->packet_count;
+ byte_count += subrule->byte_count;
}
} else {
odp_flows[0].key = rule->cr.flow;
}
static void
-update_time(struct rule *rule, const struct odp_flow_stats *stats)
+update_time(struct ofproto *ofproto, struct rule *rule,
+ const struct odp_flow_stats *stats)
{
long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
if (used > rule->used) {
rule->used = used;
+ if (rule->super && used > rule->super->used) {
+ rule->super->used = used;
+ }
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
}
}
static void
-update_stats(struct rule *rule, const struct odp_flow_stats *stats)
+update_stats(struct ofproto *ofproto, struct rule *rule,
+ const struct odp_flow_stats *stats)
{
- update_time(rule, stats);
- rule->packet_count += stats->n_packets;
- rule->byte_count += stats->n_bytes;
- rule->tcp_flags |= stats->tcp_flags;
if (stats->n_packets) {
- rule->ip_tos = stats->ip_tos;
+ update_time(ofproto, rule, stats);
+ rule->packet_count += stats->n_packets;
+ rule->byte_count += stats->n_bytes;
+ netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
+ stats->tcp_flags);
}
}
uint16_t in_port;
int error;
- rule = rule_create(NULL, (const union ofp_action *) ofm->actions,
+ rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
n_actions, ntohs(ofm->idle_timeout),
ntohs(ofm->hard_timeout));
cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
{
size_t msg_len = ntohs(ofmph->header.header.length);
if (msg_len < sizeof(*ofmph)) {
- VLOG_WARN_RL(&rl, "dropping short managment message: %d\n", msg_len);
+ VLOG_WARN_RL(&rl, "dropping short managment message: %zu\n", msg_len);
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
}
struct ofmp_capability_request *ofmpcr;
if (msg_len < sizeof(struct ofmp_capability_request)) {
- VLOG_WARN_RL(&rl, "dropping short capability request: %d\n",
+ VLOG_WARN_RL(&rl, "dropping short capability request: %zu\n",
msg_len);
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH);
}
}
\f
static void
-handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
+handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet)
{
struct odp_msg *msg = packet->data;
uint16_t in_port = odp_port_to_ofp_port(msg->port);
struct ofpbuf payload;
flow_t flow;
- /* Handle controller actions. */
- if (msg->type == _ODPL_ACTION_NR) {
- COVERAGE_INC(ofproto_ctlr_action);
- pinsched_send(p->action_sched, in_port, packet,
- send_packet_in_action, p);
- return;
- }
-
payload.data = msg + 1;
payload.size = msg->length - sizeof *msg;
flow_extract(&payload, msg->port, &flow);
rule_execute(p, rule, &payload, &flow);
rule_reinstall(p, rule);
- ofpbuf_delete(packet);
+
+ if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY
+ && rconn_is_connected(p->controller->rconn)) {
+ /*
+ * Extra-special case for fail-open mode.
+ *
+ * We are in fail-open mode and the packet matched the fail-open rule,
+ * but we are connected to a controller too. We should send the packet
+ * up to the controller in the hope that it will try to set up a flow
+ * and thereby allow us to exit fail-open.
+ *
+ * See the top-level comment in fail-open.c for more information.
+ */
+ pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
+ } else {
+ ofpbuf_delete(packet);
+ }
+}
+
+static void
+handle_odp_msg(struct ofproto *p, struct ofpbuf *packet)
+{
+ struct odp_msg *msg = packet->data;
+
+ switch (msg->type) {
+ case _ODPL_ACTION_NR:
+ COVERAGE_INC(ofproto_ctlr_action);
+ pinsched_send(p->action_sched, odp_port_to_ofp_port(msg->port), packet,
+ send_packet_in_action, p);
+ break;
+
+ case _ODPL_SFLOW_NR:
+ if (p->sflow) {
+ ofproto_sflow_received(p->sflow, msg);
+ }
+ ofpbuf_delete(packet);
+ break;
+
+ case _ODPL_MISS_NR:
+ handle_odp_miss_msg(p, packet);
+ break;
+
+ default:
+ VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32,
+ msg->type);
+ break;
+ }
}
\f
static void
flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofe->match);
ofe->priority = htons(rule->cr.priority);
ofe->reason = reason;
- ofe->duration = (now - rule->created) / 1000;
- ofe->packet_count = rule->packet_count;
- ofe->byte_count = rule->byte_count;
+ ofe->duration = htonl((now - rule->created) / 1000);
+ ofe->packet_count = htonll(rule->packet_count);
+ ofe->byte_count = htonll(rule->byte_count);
return buf;
}
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
if (ofconn->send_flow_exp && rconn_is_connected(ofconn->rconn)) {
if (prev) {
- queue_tx(ofpbuf_clone(buf), prev, ofconn->reply_counter);
+ queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
} else {
buf = compose_flow_exp(rule, now, reason);
}
}
}
if (prev) {
- queue_tx(buf, prev, ofconn->reply_counter);
+ queue_tx(buf, prev, prev->reply_counter);
}
}
? rule->used + rule->idle_timeout * 1000
: LLONG_MAX);
expire = MIN(hard_expire, idle_expire);
- if (expire == LLONG_MAX) {
- if (rule->installed && time_msec() >= rule->used + 5000) {
- uninstall_idle_flow(p, rule);
- }
- return;
- }
now = time_msec();
if (now < expire) {
if (rule->installed && now >= rule->used + 5000) {
uninstall_idle_flow(p, rule);
+ } else if (!rule->cr.wc.wildcards) {
+ active_timeout(p, rule);
}
+
return;
}
COVERAGE_INC(ofproto_expired);
+
+ /* Update stats. This code will be a no-op if the rule expired
+ * due to an idle timeout. */
if (rule->cr.wc.wildcards) {
- /* Update stats. (This code will be a no-op if the rule expired
- * due to an idle timeout, because in that case the rule has no
- * subrules left.) */
struct rule *subrule, *next;
LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
rule_remove(p, subrule);
}
+ } else {
+ rule_uninstall(p, rule);
}
- send_flow_exp(p, rule, now,
- (now >= hard_expire
- ? OFPER_HARD_TIMEOUT : OFPER_IDLE_TIMEOUT));
+ if (!rule_is_hidden(rule)) {
+ send_flow_exp(p, rule, now,
+ (now >= hard_expire
+ ? OFPER_HARD_TIMEOUT : OFPER_IDLE_TIMEOUT));
+ }
rule_remove(p, rule);
}
+static void
+active_timeout(struct ofproto *ofproto, struct rule *rule)
+{
+ if (ofproto->netflow && !is_controller_rule(rule) &&
+ netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
+ struct ofexpired expired;
+ struct odp_flow odp_flow;
+
+ /* Get updated flow stats. */
+ memset(&odp_flow, 0, sizeof odp_flow);
+ if (rule->installed) {
+ odp_flow.key = rule->cr.flow;
+ odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
+ dpif_flow_get(ofproto->dpif, &odp_flow);
+
+ if (odp_flow.stats.n_packets) {
+ update_time(ofproto, rule, &odp_flow.stats);
+ netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
+ odp_flow.stats.tcp_flags);
+ }
+ }
+
+ expired.flow = rule->cr.flow;
+ expired.packet_count = rule->packet_count +
+ odp_flow.stats.n_packets;
+ expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
+ expired.used = rule->used;
+
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
+
+ /* Schedule us to send the accumulated records once we have
+ * collected all of them. */
+ poll_immediate_wake();
+ }
+}
+
static void
update_used(struct ofproto *p)
{
continue;
}
- update_time(rule, &f->stats);
+ update_time(p, rule, &f->stats);
rule_account(p, rule, f->stats.n_bytes);
}
free(flows);
do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id,
const struct ofpbuf *packet, int send_len)
{
- struct ofp_packet_in *opi;
- struct ofpbuf payload, *buf;
- struct odp_msg *msg;
+ struct odp_msg *msg = packet->data;
+ struct ofpbuf payload;
+ struct ofpbuf *opi;
+ uint8_t reason;
- msg = packet->data;
+ /* Extract packet payload from 'msg'. */
payload.data = msg + 1;
payload.size = msg->length - sizeof *msg;
- send_len = MIN(send_len, payload.size);
- buf = ofpbuf_new(sizeof *opi + send_len);
- opi = put_openflow_xid(offsetof(struct ofp_packet_in, data),
- OFPT_PACKET_IN, 0, buf);
- opi->buffer_id = htonl(buffer_id);
- opi->total_len = htons(payload.size);
- opi->in_port = htons(odp_port_to_ofp_port(msg->port));
- opi->reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
- ofpbuf_put(buf, payload.data, MIN(send_len, payload.size));
- update_openflow_length(buf);
- rconn_send_with_limit(ofconn->rconn, buf, ofconn->packet_in_counter, 100);
+ /* Construct ofp_packet_in message. */
+ reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
+ opi = make_packet_in(buffer_id, odp_port_to_ofp_port(msg->port), reason,
+ &payload, send_len);
+
+ /* Send. */
+ rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100);
}
static void
send_packet_in_miss(struct ofpbuf *packet, void *p_)
{
struct ofproto *p = p_;
+ bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open);
struct ofconn *ofconn;
struct ofpbuf payload;
struct odp_msg *msg;
payload.size = msg->length - sizeof *msg;
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
if (ofconn->miss_send_len) {
- uint32_t buffer_id = pktbuf_save(ofconn->pktbuf, &payload,
- msg->port);
+ struct pktbuf *pb = ofconn->pktbuf;
+ uint32_t buffer_id = (in_fail_open
+ ? pktbuf_get_null()
+ : pktbuf_save(pb, &payload, msg->port));
int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len
: UINT32_MAX);
do_send_packet_in(ofconn, buffer_id, packet, send_len);
static bool
default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
struct odp_actions *actions, tag_type *tags,
- void *ofproto_)
+ uint16_t *nf_output_iface, void *ofproto_)
{
struct ofproto *ofproto = ofproto_;
int out_port;
/* Determine output port. */
out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
if (out_port < 0) {
- add_output_group_action(actions, DP_GROUP_FLOOD);
+ add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
} else if (out_port != flow->in_port) {
odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port;
+ *nf_output_iface = out_port;
} else {
/* Drop. */
}