+
+ return ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
+ &output.ofpact, sizeof output,
+ packet);
+}
+
+struct xlate_cache *
+xlate_cache_new(void)
+{
+ struct xlate_cache *xcache = xmalloc(sizeof *xcache);
+
+ ofpbuf_init(&xcache->entries, 512);
+ return xcache;
+}
+
+static struct xc_entry *
+xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type)
+{
+ struct xc_entry *entry;
+
+ entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry);
+ entry->type = type;
+
+ return entry;
+}
+
+static void
+xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats)
+{
+ if (entry->u.dev.tx) {
+ netdev_vport_inc_tx(entry->u.dev.tx, stats);
+ }
+ if (entry->u.dev.rx) {
+ netdev_vport_inc_rx(entry->u.dev.rx, stats);
+ }
+ if (entry->u.dev.bfd) {
+ bfd_account_rx(entry->u.dev.bfd, stats);
+ }
+}
+
+static void
+xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan)
+{
+ struct xbridge *xbridge;
+ struct xbundle *xbundle;
+ struct flow_wildcards wc;
+
+ xbridge = xbridge_lookup(ofproto);
+ if (!xbridge) {
+ return;
+ }
+
+ xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false,
+ NULL);
+ if (!xbundle) {
+ return;
+ }
+
+ update_learning_table(xbridge, flow, &wc, vlan, xbundle);
+}
+
+/* Push stats and perform side effects of flow translation. */
+void
+xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
+ const struct dpif_flow_stats *stats)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries = xcache->entries;
+
+ XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_credit_stats(entry->u.rule, stats);
+ break;
+ case XC_BOND:
+ bond_account(entry->u.bond.bond, entry->u.bond.flow,
+ entry->u.bond.vid, stats->n_bytes);
+ break;
+ case XC_NETDEV:
+ xlate_cache_netdev(entry, stats);
+ break;
+ case XC_NETFLOW:
+ netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow,
+ entry->u.nf.iface, stats);
+ break;
+ case XC_MIRROR:
+ mirror_update_stats(entry->u.mirror.mbridge,
+ entry->u.mirror.mirrors,
+ stats->n_packets, stats->n_bytes);
+ break;
+ case XC_LEARN:
+ if (may_learn) {
+ struct rule_dpif *rule = entry->u.learn.rule;
+
+ /* Reset the modified time for a rule that is equivalent to
+ * the currently cached rule. If the rule is not the exact
+ * rule we have cached, update the reference that we have. */
+ entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+ }
+ break;
+ case XC_NORMAL:
+ xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
+ entry->u.normal.vlan);
+ break;
+ case XC_FIN_TIMEOUT:
+ xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,
+ entry->u.fin.idle, entry->u.fin.hard);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+}
+
+static void
+xlate_dev_unref(struct xc_entry *entry)
+{
+ if (entry->u.dev.tx) {
+ netdev_close(entry->u.dev.tx);
+ }
+ if (entry->u.dev.rx) {
+ netdev_close(entry->u.dev.rx);
+ }
+ if (entry->u.dev.bfd) {
+ bfd_unref(entry->u.dev.bfd);
+ }
+}
+
+static void
+xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
+{
+ netflow_expire(netflow, flow);
+ netflow_flow_clear(netflow, flow);
+ netflow_unref(netflow);
+ free(flow);
+}
+
+void
+xlate_cache_clear(struct xlate_cache *xcache)
+{
+ struct xc_entry *entry;
+ struct ofpbuf entries;
+
+ if (!xcache) {
+ return;
+ }
+
+ XC_ENTRY_FOR_EACH (entry, entries, xcache) {
+ switch (entry->type) {
+ case XC_RULE:
+ rule_dpif_unref(entry->u.rule);
+ break;
+ case XC_BOND:
+ free(entry->u.bond.flow);
+ bond_unref(entry->u.bond.bond);
+ break;
+ case XC_NETDEV:
+ xlate_dev_unref(entry);
+ break;
+ case XC_NETFLOW:
+ xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow);
+ break;
+ case XC_MIRROR:
+ mbridge_unref(entry->u.mirror.mbridge);
+ break;
+ case XC_LEARN:
+ /* 'u.learn.rule' is the learned rule. */
+ rule_dpif_unref(entry->u.learn.rule);
+ break;
+ case XC_NORMAL:
+ free(entry->u.normal.flow);
+ break;
+ case XC_FIN_TIMEOUT:
+ /* 'u.fin.rule' is always already held as a XC_RULE, which
+ * has already released it's reference above. */
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ }
+
+ ofpbuf_clear(&xcache->entries);
+}
+
+void
+xlate_cache_delete(struct xlate_cache *xcache)
+{
+ xlate_cache_clear(xcache);
+ ofpbuf_uninit(&xcache->entries);
+ free(xcache);