#include "netlink.h"
#include "nx-match.h"
#include "odp-util.h"
+#include "odp-execute.h"
#include "ofp-util.h"
#include "ofpbuf.h"
#include "ofp-actions.h"
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
static void rule_credit_stats(struct rule_dpif *,
const struct dpif_flow_stats *);
-static void flow_push_stats(struct facet *, const struct dpif_flow_stats *);
static tag_type rule_calculate_tag(const struct flow *,
const struct minimask *, uint32_t basis);
static void rule_invalidate(const struct rule_dpif *);
uint64_t dp_byte_count; /* Last known byte count in the datapath. */
enum subfacet_path path; /* Installed in datapath? */
-
- /* Datapath port the packet arrived on. This is needed to remove
- * flows for ports that are no longer part of the bridge. Since the
- * flow definition only has the OpenFlow port number and the port is
- * no longer part of the bridge, we can't determine the datapath port
- * number needed to delete the flow from the datapath. */
- uint32_t odp_in_port;
};
#define SUBFACET_DESTROY_MAX_BATCH 50
struct subfacet **, int n);
static void subfacet_reset_dp_stats(struct subfacet *,
struct dpif_flow_stats *);
-static void subfacet_update_time(struct subfacet *, long long int used);
static void subfacet_update_stats(struct subfacet *,
const struct dpif_flow_stats *);
static int subfacet_install(struct subfacet *,
static void facet_flush_stats(struct facet *);
-static void facet_update_time(struct facet *, long long int used);
static void facet_reset_counters(struct facet *);
-static void facet_push_stats(struct facet *);
+static void facet_push_stats(struct facet *, bool may_learn);
static void facet_learn(struct facet *);
static void facet_account(struct facet *);
static void push_all_stats(void);
int vid;
};
-static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
- uint32_t realdev, ovs_be16 vlan_tci);
+static uint16_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
+ uint16_t realdev_ofp_port,
+ ovs_be16 vlan_tci);
static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
static void vsp_remove(struct ofport_dpif *);
static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
struct tag_set revalidate_set; /* Revalidate only matching facets. */
struct hmap drop_keys; /* Set of dropped odp keys. */
+ bool recv_set_enable; /* Enables or disables receiving packets. */
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
static struct ofport_dpif *
odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
-static void dpif_stats_update_hit_count(struct ofproto_dpif *ofproto,
- uint64_t delta);
struct avg_subfacet_rates {
double add_rate; /* Moving average of new flows created per minute. */
double del_rate; /* Moving average of flows deleted per minute. */
/* Special OpenFlow rules. */
struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
+ struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
/* Bridging. */
struct netflow *netflow;
const struct ofproto_dpif *);
static double avg_subfacet_count(const struct ofproto_dpif *ofproto);
static void update_moving_averages(struct ofproto_dpif *ofproto);
-static void dpif_stats_update_hit_count(struct ofproto_dpif *ofproto,
- uint64_t delta);
static void update_max_subfacet_count(struct ofproto_dpif *ofproto);
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
push_all_stats();
}
+ /* If vswitchd started with other_config:flow_restore_wait set as "true",
+ * and the configuration has now changed to "false", enable receiving
+ * packets from the datapath. */
+ if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
+ backer->recv_set_enable = true;
+
+ error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
+ if (error) {
+ VLOG_ERR("Failed to enable receiving packets in dpif.");
+ return error;
+ }
+ dpif_flow_flush(backer->dpif);
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+
if (backer->need_revalidate
|| !tag_set_is_empty(&backer->revalidate_set)) {
struct tag_set revalidate_set = backer->revalidate_set;
}
}
- if (timer_expired(&backer->next_expiration)) {
+ if (!backer->recv_set_enable) {
+ /* Wake up before a max of 1000ms. */
+ timer_set_duration(&backer->next_expiration, 1000);
+ } else if (timer_expired(&backer->next_expiration)) {
int delay = expire(backer);
timer_set_duration(&backer->next_expiration, delay);
}
{
unsigned int work;
+ /* If recv_set_enable is false, we should not handle upcalls. */
+ if (!backer->recv_set_enable) {
+ return 0;
+ }
+
/* Handle one or more batches of upcalls, until there's nothing left to do
* or until we do a fixed total amount of work.
*
backer->need_revalidate = 0;
simap_init(&backer->tnl_backers);
tag_set_init(&backer->revalidate_set);
+ backer->recv_set_enable = !ofproto_get_flow_restore_wait();
*backerp = backer;
- dpif_flow_flush(backer->dpif);
+ if (backer->recv_set_enable) {
+ dpif_flow_flush(backer->dpif);
+ }
/* Loop through the ports already on the datapath and remove any
* that we don't need anymore. */
shash_add(&all_dpif_backers, type, backer);
- error = dpif_recv_set(backer->dpif, true);
+ error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
VLOG_ERR("failed to listen on datapath of type %s: %s",
type, strerror(error));
ofpbuf_clear(&ofpacts);
error = add_internal_flow(ofproto, id++, &ofpacts,
&ofproto->no_packet_in_rule);
+ if (error) {
+ return error;
+ }
+
+ error = add_internal_flow(ofproto, id++, &ofpacts,
+ &ofproto->drop_frags_rule);
return error;
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofport_dpif *ofport;
+ /* Do not perform any periodic activity required by 'ofproto' while
+ * waiting for flow restore to complete. */
+ if (ofproto_get_flow_restore_wait()) {
+ return 0;
+ }
+
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run_fast(ofport);
}
complete_operations(ofproto);
}
+ /* Do not perform any periodic activity below required by 'ofproto' while
+ * waiting for flow restore to complete. */
+ if (ofproto_get_flow_restore_wait()) {
+ return 0;
+ }
+
error = run_fast(ofproto_);
if (error) {
return error;
poll_immediate_wake();
}
+ if (ofproto_get_flow_restore_wait()) {
+ return;
+ }
+
dpif_wait(ofproto->backer->dpif);
dpif_recv_wait(ofproto->backer->dpif);
if (ofproto->sflow) {
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct dpif_dp_stats s;
- uint64_t n_miss, n_no_pkt_in, n_bytes;
+ uint64_t n_miss, n_no_pkt_in, n_bytes, n_dropped_frags;
uint64_t n_lookup;
strcpy(ots->name, "classifier");
dpif_get_dp_stats(ofproto->backer->dpif, &s);
rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes);
rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes);
+ rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes);
- n_lookup = s.n_hit + s.n_missed;
+ n_lookup = s.n_hit + s.n_missed - n_dropped_frags;
ots->lookup_count = htonll(n_lookup);
ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in);
}
if (port->bundle && port->bundle->bond) {
bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
}
+
+ if (port->cfm) {
+ cfm_set_netdev(port->cfm, port->up.netdev);
+ }
}
static void
struct initial_vals initial_vals;
struct list packets;
enum dpif_upcall_type upcall_type;
- uint32_t odp_in_port;
};
struct flow_miss_op {
* flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental
* assumption used throughout the facet and subfacet handling code.
* Since we have to handle these misses in userspace anyway, we simply
- * skip facet creation, avoiding the problem alltogether. */
+ * skip facet creation, avoiding the problem altogether. */
if (miss->key_fitness == ODP_FIT_TOO_LITTLE
|| !flow_miss_should_make_facet(ofproto, miss, hash)) {
handle_flow_miss_without_facet(miss, ops, n_ops);
if (error == ENODEV) {
struct drop_key *drop_key;
- /* Received packet on port for which we couldn't associate
- * an ofproto. This can happen if a port is removed while
- * traffic is being received. Print a rate-limited message
+ /* Received packet on datapath port for which we couldn't
+ * associate an ofproto. This can happen if a port is removed
+ * while traffic is being received. Print a rate-limited message
* in case it happens frequently. Install a drop flow so
* that future packets of the flow are inexpensively dropped
* in the kernel. */
- VLOG_INFO_RL(&rl, "received packet on unassociated port %"PRIu32,
- flow.in_port);
+ VLOG_INFO_RL(&rl, "received packet on unassociated datapath port "
+ "%"PRIu32, odp_in_port);
drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
if (!drop_key) {
miss->key = upcall->key;
miss->key_len = upcall->key_len;
miss->upcall_type = upcall->type;
- miss->odp_in_port = odp_in_port;
list_init(&miss->packets);
n_misses++;
const struct dpif_flow_stats *stats)
{
struct facet *facet = subfacet->facet;
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct dpif_flow_stats diff;
+
+ diff.tcp_flags = stats->tcp_flags;
+ diff.used = stats->used;
if (stats->n_packets >= subfacet->dp_packet_count) {
- uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
- facet->packet_count += extra;
+ diff.n_packets = stats->n_packets - subfacet->dp_packet_count;
} else {
VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
+ diff.n_packets = 0;
}
if (stats->n_bytes >= subfacet->dp_byte_count) {
- facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
+ diff.n_bytes = stats->n_bytes - subfacet->dp_byte_count;
} else {
VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
+ diff.n_bytes = 0;
}
+ ofproto->n_hit += diff.n_packets;
subfacet->dp_packet_count = stats->n_packets;
subfacet->dp_byte_count = stats->n_bytes;
+ subfacet_update_stats(subfacet, &diff);
- facet->tcp_flags |= stats->tcp_flags;
-
- subfacet_update_time(subfacet, stats->used);
if (facet->accounted_bytes < facet->byte_count) {
facet_learn(facet);
facet_account(facet);
while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
struct flow flow;
struct subfacet *subfacet;
- struct ofport_dpif *ofport;
uint32_t key_hash;
if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
ofproto->total_subfacet_count += hmap_count(&ofproto->subfacets);
ofproto->n_update_stats++;
- ofport = get_ofp_port(ofproto, flow.in_port);
- if (ofport && ofport->tnl_port) {
- netdev_vport_inc_rx(ofport->up.netdev, stats);
- }
-
key_hash = odp_flow_key_hash(key, key_len);
subfacet = subfacet_find(ofproto, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
- /* Update ofproto_dpif's hit count. */
- if (stats->n_packets > subfacet->dp_packet_count) {
- uint64_t delta = stats->n_packets - subfacet->dp_packet_count;
- dpif_stats_update_hit_count(ofproto, delta);
- }
-
update_subfacet_stats(subfacet, stats);
break;
static void
facet_learn(struct facet *facet)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
long long int now = time_msec();
- struct xlate_in xin;
if (!facet->xout.has_fin_timeout && now < facet->learn_rl) {
return;
return;
}
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, facet->tcp_flags, NULL);
- xin.may_learn = true;
- xlate_actions_for_side_effects(&xin);
+ facet_push_stats(facet, true);
}
static void
ovs_assert(!subfacet->dp_packet_count);
}
- facet_push_stats(facet);
+ facet_push_stats(facet, false);
if (facet->accounted_bytes < facet->byte_count) {
facet_account(facet);
facet->accounted_bytes = facet->byte_count;
|| tag_set_intersects(&ofproto->backer->revalidate_set,
facet->xout.tags))
&& !facet_revalidate(facet)) {
- facet_revalidate(facet);
-
- /* facet_revalidate() may have destroyed 'facet'. */
- facet = facet_find(ofproto, flow, hash);
+ return NULL;
}
return facet;
return true;
}
-/* Updates 'facet''s used time. Caller is responsible for calling
- * facet_push_stats() to update the flows which 'facet' resubmits into. */
-static void
-facet_update_time(struct facet *facet, long long int used)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- if (used > facet->used) {
- facet->used = used;
- ofproto_rule_update_used(&facet->rule->up, used);
- netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
- }
-}
-
static void
facet_reset_counters(struct facet *facet)
{
}
static void
-facet_push_stats(struct facet *facet)
+facet_push_stats(struct facet *facet, bool may_learn)
{
struct dpif_flow_stats stats;
stats.n_packets = facet->packet_count - facet->prev_packet_count;
stats.n_bytes = facet->byte_count - facet->prev_byte_count;
stats.used = facet->used;
- stats.tcp_flags = 0;
+ stats.tcp_flags = facet->tcp_flags;
+
+ if (may_learn || stats.n_packets || facet->used > facet->prev_used) {
+ struct ofproto_dpif *ofproto =
+ ofproto_dpif_cast(facet->rule->up.ofproto);
+
+ struct ofport_dpif *in_port;
+ struct xlate_in xin;
- if (stats.n_packets || stats.n_bytes || facet->used > facet->prev_used) {
facet->prev_packet_count = facet->packet_count;
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
- rule_credit_stats(facet->rule, &stats);
- flow_push_stats(facet, &stats);
+ in_port = get_ofp_port(ofproto, facet->flow.in_port);
+ if (in_port && in_port->tnl_port) {
+ netdev_vport_inc_rx(in_port->up.netdev, &stats);
+ }
- update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
- facet->xout.mirrors, stats.n_packets,
+ rule_credit_stats(facet->rule, &stats);
+ netflow_flow_update_time(ofproto->netflow, &facet->nf_flow,
+ facet->used);
+ netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags);
+ update_mirror_stats(ofproto, facet->xout.mirrors, stats.n_packets,
stats.n_bytes);
+
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, stats.tcp_flags, NULL);
+ xin.resubmit_stats = &stats;
+ xin.may_learn = may_learn;
+ xlate_actions_for_side_effects(&xin);
}
}
struct facet *facet;
HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
- facet_push_stats(facet);
+ facet_push_stats(facet, false);
if (run_fast) {
run_fast_rl();
}
rule->byte_count += stats->n_bytes;
ofproto_rule_update_used(&rule->up, stats->used);
}
-
-/* Pushes flow statistics to the rules which 'facet->flow' resubmits
- * into given 'facet->rule''s actions and mirrors. */
-static void
-flow_push_stats(struct facet *facet, const struct dpif_flow_stats *stats)
-{
- struct rule_dpif *rule = facet->rule;
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct xlate_in xin;
-
- ofproto_rule_update_used(&rule->up, stats->used);
-
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, rule,
- 0, NULL);
- xin.resubmit_stats = stats;
- xlate_actions_for_side_effects(&xin);
-}
\f
/* Subfacets. */
subfacet->dp_packet_count = 0;
subfacet->dp_byte_count = 0;
subfacet->path = SF_NOT_INSTALLED;
- subfacet->odp_in_port = miss->odp_in_port;
ofproto->subfacet_add_count++;
return subfacet;
subfacet->dp_byte_count = 0;
}
-/* Updates 'subfacet''s used time. The caller is responsible for calling
- * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
-static void
-subfacet_update_time(struct subfacet *subfacet, long long int used)
-{
- if (used > subfacet->used) {
- subfacet->used = used;
- facet_update_time(subfacet->facet, used);
- }
-}
-
/* Folds the statistics from 'stats' into the counters in 'subfacet'.
*
* Because of the meaning of a subfacet's counters, it only makes sense to do
if (stats->n_packets || stats->used > subfacet->used) {
struct facet *facet = subfacet->facet;
- subfacet_update_time(subfacet, stats->used);
+ subfacet->used = MAX(subfacet->used, stats->used);
+ facet->used = MAX(facet->used, stats->used);
facet->packet_count += stats->n_packets;
facet->byte_count += stats->n_bytes;
facet->tcp_flags |= stats->tcp_flags;
- netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
}
}
\f
{
struct cls_rule *cls_rule;
struct classifier *cls;
+ bool frag;
if (table_id >= N_TABLES) {
return NULL;
}
cls = &ofproto->up.tables[table_id].cls;
- if (flow->nw_frag & FLOW_NW_FRAG_ANY
- && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
- /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
- * are unavailable. */
+ frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
+ if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+ /* We must pretend that transport ports are unavailable. */
struct flow ofpc_normal_flow = *flow;
ofpc_normal_flow.tp_src = htons(0);
ofpc_normal_flow.tp_dst = htons(0);
cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
+ } else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) {
+ cls_rule = &ofproto->drop_frags_rule->up.cr;
} else {
cls_rule = classifier_lookup(cls, flow);
}
&ctx->xout->odp_actions);
ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */
} else {
+ uint16_t vlandev_port;
odp_port = ofport->odp_port;
- out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
- ctx->xin->flow.vlan_tci);
- if (out_port != odp_port) {
+ vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port,
+ ctx->xin->flow.vlan_tci);
+ if (vlandev_port == ofp_port) {
+ out_port = odp_port;
+ } else {
+ out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port);
ctx->xin->flow.vlan_tci = htons(0);
}
ctx->xin->flow.skb_mark &= ~IPSEC_MARK;
{
struct ofputil_packet_in pin;
struct ofpbuf *packet;
+ struct flow key;
ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
ctx->xout->slow = SLOW_CONTROLLER;
packet = ofpbuf_clone(ctx->xin->packet);
- if (packet->l2 && packet->l3) {
- struct eth_header *eh;
- uint16_t mpls_depth;
-
- eth_pop_vlan(packet);
- eh = packet->l2;
-
- memcpy(eh->eth_src, ctx->xin->flow.dl_src, sizeof eh->eth_src);
- memcpy(eh->eth_dst, ctx->xin->flow.dl_dst, sizeof eh->eth_dst);
-
- if (ctx->xin->flow.vlan_tci & htons(VLAN_CFI)) {
- eth_push_vlan(packet, ctx->xin->flow.vlan_tci);
- }
-
- mpls_depth = eth_mpls_depth(packet);
-
- if (mpls_depth < ctx->xin->flow.mpls_depth) {
- push_mpls(packet, ctx->xin->flow.dl_type, ctx->xin->flow.mpls_lse);
- } else if (mpls_depth > ctx->xin->flow.mpls_depth) {
- pop_mpls(packet, ctx->xin->flow.dl_type);
- } else if (mpls_depth) {
- set_mpls_lse(packet, ctx->xin->flow.mpls_lse);
- }
+ key.skb_priority = 0;
+ key.skb_mark = 0;
+ memset(&key.tunnel, 0, sizeof key.tunnel);
- if (packet->l4) {
- if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
- packet_set_ipv4(packet, ctx->xin->flow.nw_src,
- ctx->xin->flow.nw_dst, ctx->xin->flow.nw_tos,
- ctx->xin->flow.nw_ttl);
- }
+ commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
- if (packet->l7) {
- if (ctx->xin->flow.nw_proto == IPPROTO_TCP) {
- packet_set_tcp_port(packet, ctx->xin->flow.tp_src,
- ctx->xin->flow.tp_dst);
- } else if (ctx->xin->flow.nw_proto == IPPROTO_UDP) {
- packet_set_udp_port(packet, ctx->xin->flow.tp_src,
- ctx->xin->flow.tp_dst);
- }
- }
- }
- }
+ odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
+ ctx->xout->odp_actions.size, NULL, NULL);
pin.packet = packet->data;
pin.packet_len = packet->size;
} else if (rule == ofproto->no_packet_in_rule) {
ds_put_cstr(ds, "\nNo match, packets dropped because "
"OFPPC_NO_PACKET_IN is set on in_port.\n");
+ } else if (rule == ofproto->drop_frags_rule) {
+ ds_put_cstr(ds, "\nPackets dropped because they are IP fragments "
+ "and the fragment handling mode is \"drop\".\n");
}
if (rule) {
return hash_2words(realdev_ofp_port, vid);
}
-/* Returns the ODP port number of the Linux VLAN device that corresponds to
- * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
- * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
- * it would return the port number of eth0.9.
+/* Returns the OFP port number of the Linux VLAN device that corresponds to
+ * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
+ * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and
+ * 'vlan_tci' 9, it would return the port number of eth0.9.
*
- * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
- * function just returns its 'realdev_odp_port' argument. */
-static uint32_t
+ * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
+ * function just returns its 'realdev_ofp_port' argument. */
+static uint16_t
vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
- uint32_t realdev_odp_port, ovs_be16 vlan_tci)
+ uint16_t realdev_ofp_port, ovs_be16 vlan_tci)
{
if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
- uint16_t realdev_ofp_port;
int vid = vlan_tci_to_vid(vlan_tci);
const struct vlan_splinter *vsp;
- realdev_ofp_port = odp_port_to_ofp_port(ofproto, realdev_odp_port);
HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
hash_realdev_vid(realdev_ofp_port, vid),
&ofproto->realdev_vid_map) {
if (vsp->realdev_ofp_port == realdev_ofp_port
&& vsp->vid == vid) {
- return ofp_port_to_odp_port(ofproto, vsp->vlandev_ofp_port);
+ return vsp->vlandev_ofp_port;
}
}
}
- return realdev_odp_port;
+ return realdev_ofp_port;
}
static struct vlan_splinter *
}
}
-static void
-dpif_stats_update_hit_count(struct ofproto_dpif *ofproto, uint64_t delta)
-{
- ofproto->n_hit += delta;
-}
-
const struct ofproto_class ofproto_dpif_class = {
init,
enumerate_types,