+static void
+xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket)
+{
+ uint64_t action_list_stub[1024 / 8];
+ struct ofpbuf action_list, action_set;
+
+ ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len);
+ ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
+
+ ofpacts_execute_action_set(&action_list, &action_set);
+ ctx->recurse++;
+ do_xlate_actions(action_list.data, action_list.size, ctx);
+ ctx->recurse--;
+
+ ofpbuf_uninit(&action_set);
+ ofpbuf_uninit(&action_list);
+}
+
+static void
+xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ const struct ofputil_bucket *bucket;
+ const struct list *buckets;
+ struct flow old_flow = ctx->xin->flow;
+
+ group_dpif_get_buckets(group, &buckets);
+
+ LIST_FOR_EACH (bucket, list_node, buckets) {
+ xlate_group_bucket(ctx, bucket);
+ /* Roll back flow to previous state.
+ * This is equivalent to cloning the packet for each bucket.
+ *
+ * As a side effect any subsequently applied actions will
+ * also effectively be applied to a clone of the packet taken
+ * just before applying the all or indirect group. */
+ ctx->xin->flow = old_flow;
+ }
+}
+
+static void
+xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ const struct ofputil_bucket *bucket;
+
+ bucket = group_first_live_bucket(ctx, group, 0);
+ if (bucket) {
+ xlate_group_bucket(ctx, bucket);
+ }
+}
+
+static void
+xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ struct flow_wildcards *wc = &ctx->xout->wc;
+ const struct ofputil_bucket *bucket;
+ uint32_t basis;
+
+ basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0);
+ bucket = group_best_live_bucket(ctx, group, basis);
+ if (bucket) {
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ xlate_group_bucket(ctx, bucket);
+ }
+}
+
+static void
+xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
+{
+ ctx->in_group = true;
+
+ switch (group_dpif_get_type(group)) {
+ case OFPGT11_ALL:
+ case OFPGT11_INDIRECT:
+ xlate_all_group(ctx, group);
+ break;
+ case OFPGT11_SELECT:
+ xlate_select_group(ctx, group);
+ break;
+ case OFPGT11_FF:
+ xlate_ff_group(ctx, group);
+ break;
+ default:
+ OVS_NOT_REACHED();
+ }
+ group_dpif_release(group);
+
+ ctx->in_group = false;
+}
+
+static bool
+xlate_group_resource_check(struct xlate_ctx *ctx)
+{
+ if (!xlate_resubmit_resource_check(ctx)) {
+ return false;
+ } else if (ctx->in_group) {
+ /* Prevent nested translation of OpenFlow groups.
+ *
+ * OpenFlow allows this restriction. We enforce this restriction only
+ * because, with the current architecture, we would otherwise have to
+ * take a possibly recursive read lock on the ofgroup rwlock, which is
+ * unsafe given that POSIX allows taking a read lock to block if there
+ * is a thread blocked on taking the write lock. Other solutions
+ * without this restriction are also possible, but seem unwarranted
+ * given the current limited use of groups. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group");
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool
+xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
+{
+ if (xlate_group_resource_check(ctx)) {
+ struct group_dpif *group;
+ bool got_group;
+
+ got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group);
+ if (got_group) {
+ xlate_group_action__(ctx, group);
+ } else {
+ return true;
+ }
+ }
+
+ return false;
+}
+