struct xport *peer; /* Patch port peer or null. */
enum ofputil_port_config config; /* OpenFlow port configuration. */
+ enum ofputil_port_state state; /* OpenFlow port state. */
int stp_port_no; /* STP port number or -1 if not in use. */
struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
const struct cfm *cfm, const struct bfd *bfd,
struct ofport_dpif *peer, int stp_port_no,
const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
- enum ofputil_port_config config, bool is_tunnel,
+ enum ofputil_port_config config,
+ enum ofputil_port_state state, bool is_tunnel,
bool may_enable)
{
struct xport *xport = xport_lookup(ofport);
ovs_assert(xport->ofp_port == ofp_port);
xport->config = config;
+ xport->state = state;
xport->stp_port_no = stp_port_no;
xport->is_tunnel = is_tunnel;
xport->may_enable = may_enable;
return xport ? xport->odp_port : ODPP_NONE;
}
+static bool
+odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
+{
+ struct xport *xport;
+
+ xport = get_ofp_port(ctx->xbridge, ofp_port);
+ if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
+ xport->state & OFPUTIL_PS_LINK_DOWN) {
+ return false;
+ }
+
+ return true;
+}
+
+static const struct ofputil_bucket *
+group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *,
+ int depth);
+
+static bool
+group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth)
+{
+ struct group_dpif *group;
+ bool hit;
+
+ hit = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
+ if (!hit) {
+ return false;
+ }
+
+ hit = group_first_live_bucket(ctx, group, depth) != NULL;
+
+ group_dpif_release(group);
+ return hit;
+}
+
+#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */
+
+static bool
+bucket_is_alive(const struct xlate_ctx *ctx,
+ const struct ofputil_bucket *bucket, int depth)
+{
+ if (depth >= MAX_LIVENESS_RECURSION) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links",
+ MAX_LIVENESS_RECURSION);
+ return false;
+ }
+
+ return !ofputil_bucket_has_liveness(bucket) ||
+ (bucket->watch_port != OFPP_ANY &&
+ odp_port_is_alive(ctx, bucket->watch_port)) ||
+ (bucket->watch_group != OFPG_ANY &&
+ group_is_alive(ctx, bucket->watch_group, depth + 1));
+}
+
+static const struct ofputil_bucket *
+group_first_live_bucket(const struct xlate_ctx *ctx,
+ const struct group_dpif *group, int depth)
+{
+ struct ofputil_bucket *bucket;
+ const struct list *buckets;
+
+ group_dpif_get_buckets(group, &buckets);
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ if (bucket_is_alive(ctx, bucket, depth)) {
+ return bucket;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct ofputil_bucket *
+group_best_live_bucket(const struct xlate_ctx *ctx,
+ const struct group_dpif *group,
+ uint32_t basis)
+{
+ const struct ofputil_bucket *best_bucket = NULL;
+ uint32_t best_score = 0;
+ int i = 0;
+
+ const struct ofputil_bucket *bucket;
+ const struct list *buckets;
+
+ group_dpif_get_buckets(group, &buckets);
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ if (bucket_is_alive(ctx, bucket, 0)) {
+ uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight;
+ if (score >= best_score) {
+ best_bucket = bucket;
+ best_score = score;
+ }
+ }
+ i++;
+ }
+
+ return best_bucket;
+}
+
static bool
xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan)
{
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 22);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 23);
if (!xport) {
xlate_report(ctx, "Nonexistent output port");
ctx->exit = true;
}
+static void
+xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
+{
+ uint64_t action_list_stub[1024 / 8];
+ struct ofpbuf action_list, action_set;
+
+ ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len);
+ ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
+
+ ofpacts_execute_action_set(&action_list, &action_set);
+ ctx->recurse++;
+ do_xlate_actions(action_list.data, action_list.size, ctx);
+ ctx->recurse--;
+
+ ofpbuf_uninit(&action_set);
+ ofpbuf_uninit(&action_list);
+}
+
+static void
+xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ const struct ofputil_bucket *bucket;
+ const struct list *buckets;
+ struct flow old_flow = ctx->xin->flow;
+
+ group_dpif_get_buckets(group, &buckets);
+
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ xlate_group_bucket(ctx, bucket);
+ /* Roll back flow to previous state.
+ * This is equivalent to cloning the packet for each bucket.
+ *
+ * As a side effect any subsequently applied actions will
+ * also effectively be applied to a clone of the packet taken
+ * just before applying the all or indirect group. */
+ ctx->xin->flow = old_flow;
+ }
+}
+
+static void
+xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ const struct ofputil_bucket *bucket;
+
+ bucket = group_first_live_bucket(ctx, group, 0);
+ if (bucket) {
+ xlate_group_bucket(ctx, bucket);
+ }
+}
+
+static void
+xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ struct flow_wildcards *wc = &ctx->xout->wc;
+ const struct ofputil_bucket *bucket;
+ uint32_t basis;
+
+ basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0);
+ bucket = group_best_live_bucket(ctx, group, basis);
+ if (bucket) {
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ xlate_group_bucket(ctx, bucket);
+ }
+}
+
+static void
+xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ switch (group_dpif_get_type(group)) {
+ case OFPGT11_ALL:
+ case OFPGT11_INDIRECT:
+ xlate_all_group(ctx, group);
+ break;
+ case OFPGT11_SELECT:
+ xlate_select_group(ctx, group);
+ break;
+ case OFPGT11_FF:
+ xlate_ff_group(ctx, group);
+ break;
+ default:
+ NOT_REACHED();
+ }
+ group_dpif_release(group);
+}
+
+static bool
+xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
+{
+ if (xlate_resubmit_resource_check(ctx)) {
+ struct group_dpif *group;
+ bool got_group;
+
+ got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
+ if (got_group) {
+ xlate_group_action__(ctx, group);
+ } else {
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void
xlate_ofpact_resubmit(struct xlate_ctx *ctx,
const struct ofpact_resubmit *resubmit)
break;
case OFPACT_GROUP:
- /* XXX not yet implemented */
+ if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
+ return;
+ }
break;
case OFPACT_CONTROLLER: