X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif-xlate.c;h=367dd88e9c034cf3eac17a92332210885dbd41fa;hb=7cb279c20248abe8e1c3f0c3639c107acac40380;hp=2a8217726e007efeb461dfba4d59ab564ce16f96;hpb=097d493945869ed3c0772d5376cd5368ebab6391;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index 2a8217726..367dd88e9 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -49,6 +49,7 @@ #include "vlog.h" COVERAGE_DEFINE(xlate_actions); +COVERAGE_DEFINE(xlate_actions_oversize); VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate); @@ -126,6 +127,7 @@ struct xport { struct xport *peer; /* Patch port peer or null. */ enum ofputil_port_config config; /* OpenFlow port configuration. */ + enum ofputil_port_state state; /* OpenFlow port state. */ int stp_port_no; /* STP port number or -1 if not in use. */ struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */ @@ -396,7 +398,8 @@ xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle, const struct cfm *cfm, const struct bfd *bfd, struct ofport_dpif *peer, int stp_port_no, const struct ofproto_port_queue *qdscp_list, size_t n_qdscp, - enum ofputil_port_config config, bool is_tunnel, + enum ofputil_port_config config, + enum ofputil_port_state state, bool is_tunnel, bool may_enable) { struct xport *xport = xport_lookup(ofport); @@ -417,6 +420,7 @@ xlate_ofport_set(struct ofproto_dpif *ofproto, struct ofbundle *ofbundle, ovs_assert(xport->ofp_port == ofp_port); xport->config = config; + xport->state = state; xport->stp_port_no = stp_port_no; xport->is_tunnel = is_tunnel; xport->may_enable = may_enable; @@ -719,6 +723,106 @@ ofp_port_to_odp_port(const struct xbridge *xbridge, ofp_port_t ofp_port) return xport ? xport->odp_port : ODPP_NONE; } +static bool +odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port) +{ + struct xport *xport; + + xport = get_ofp_port(ctx->xbridge, ofp_port); + if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN || + xport->state & OFPUTIL_PS_LINK_DOWN) { + return false; + } + + return true; +} + +static const struct ofputil_bucket * +group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *, + int depth); + +static bool +group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth) +{ + struct group_dpif *group; + bool hit; + + hit = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group); + if (!hit) { + return false; + } + + hit = group_first_live_bucket(ctx, group, depth) != NULL; + + group_dpif_release(group); + return hit; +} + +#define MAX_LIVENESS_RECURSION 128 /* Arbitrary limit */ + +static bool +bucket_is_alive(const struct xlate_ctx *ctx, + const struct ofputil_bucket *bucket, int depth) +{ + if (depth >= MAX_LIVENESS_RECURSION) { + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); + + VLOG_WARN_RL(&rl, "bucket chaining exceeded %d links", + MAX_LIVENESS_RECURSION); + return false; + } + + return !ofputil_bucket_has_liveness(bucket) || + (bucket->watch_port != OFPP_ANY && + odp_port_is_alive(ctx, bucket->watch_port)) || + (bucket->watch_group != OFPG_ANY && + group_is_alive(ctx, bucket->watch_group, depth + 1)); +} + +static const struct ofputil_bucket * +group_first_live_bucket(const struct xlate_ctx *ctx, + const struct group_dpif *group, int depth) +{ + struct ofputil_bucket *bucket; + const struct list *buckets; + + group_dpif_get_buckets(group, &buckets); + LIST_FOR_EACH (bucket, list_node, buckets) { + if (bucket_is_alive(ctx, bucket, depth)) { + return bucket; + } + } + + return NULL; +} + +static const struct ofputil_bucket * +group_best_live_bucket(const struct xlate_ctx *ctx, + const struct group_dpif *group, + uint32_t basis) +{ + const struct ofputil_bucket *best_bucket = NULL; + uint32_t best_score = 0; + int i = 0; + + const struct ofputil_bucket *bucket; + const struct list *buckets; + + group_dpif_get_buckets(group, &buckets); + LIST_FOR_EACH (bucket, list_node, buckets) { + if (bucket_is_alive(ctx, bucket, 0)) { + uint32_t score = (hash_int(i, basis) & 0xffff) * bucket->weight; + if (score >= best_score) { + best_bucket = bucket; + best_score = score; + } + } + i++; + } + + return best_bucket; +} + static bool xbundle_trunks_vlan(const struct xbundle *bundle, uint16_t vlan) { @@ -1714,9 +1818,8 @@ xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule) ctx->recurse--; } -static void -xlate_table_action(struct xlate_ctx *ctx, - ofp_port_t in_port, uint8_t table_id, bool may_packet_in) +static bool +xlate_resubmit_resource_check(struct xlate_ctx *ctx) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); @@ -1730,6 +1833,17 @@ xlate_table_action(struct xlate_ctx *ctx, } else if (ctx->stack.size >= 65536) { VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack"); } else { + return true; + } + + return false; +} + +static void +xlate_table_action(struct xlate_ctx *ctx, + ofp_port_t in_port, uint8_t table_id, bool may_packet_in) +{ + if (xlate_resubmit_resource_check(ctx)) { struct rule_dpif *rule; ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port; uint8_t old_table_id = ctx->table_id; @@ -1775,6 +1889,109 @@ xlate_table_action(struct xlate_ctx *ctx, ctx->exit = true; } +static void +xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket) +{ + uint64_t action_list_stub[1024 / 8]; + struct ofpbuf action_list, action_set; + + ofpbuf_use_const(&action_set, bucket->ofpacts, bucket->ofpacts_len); + ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub); + + ofpacts_execute_action_set(&action_list, &action_set); + ctx->recurse++; + do_xlate_actions(action_list.data, action_list.size, ctx); + ctx->recurse--; + + ofpbuf_uninit(&action_set); + ofpbuf_uninit(&action_list); +} + +static void +xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group) +{ + const struct ofputil_bucket *bucket; + const struct list *buckets; + struct flow old_flow = ctx->xin->flow; + + group_dpif_get_buckets(group, &buckets); + + LIST_FOR_EACH (bucket, list_node, buckets) { + xlate_group_bucket(ctx, bucket); + /* Roll back flow to previous state. + * This is equivalent to cloning the packet for each bucket. + * + * As a side effect any subsequently applied actions will + * also effectively be applied to a clone of the packet taken + * just before applying the all or indirect group. */ + ctx->xin->flow = old_flow; + } +} + +static void +xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group) +{ + const struct ofputil_bucket *bucket; + + bucket = group_first_live_bucket(ctx, group, 0); + if (bucket) { + xlate_group_bucket(ctx, bucket); + } +} + +static void +xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group) +{ + struct flow_wildcards *wc = &ctx->xout->wc; + const struct ofputil_bucket *bucket; + uint32_t basis; + + basis = hash_bytes(ctx->xin->flow.dl_dst, sizeof ctx->xin->flow.dl_dst, 0); + bucket = group_best_live_bucket(ctx, group, basis); + if (bucket) { + memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); + xlate_group_bucket(ctx, bucket); + } +} + +static void +xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group) +{ + switch (group_dpif_get_type(group)) { + case OFPGT11_ALL: + case OFPGT11_INDIRECT: + xlate_all_group(ctx, group); + break; + case OFPGT11_SELECT: + xlate_select_group(ctx, group); + break; + case OFPGT11_FF: + xlate_ff_group(ctx, group); + break; + default: + NOT_REACHED(); + } + group_dpif_release(group); +} + +static bool +xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id) +{ + if (xlate_resubmit_resource_check(ctx)) { + struct group_dpif *group; + bool got_group; + + got_group = group_dpif_lookup(ctx->xbridge->ofproto, group_id, &group); + if (got_group) { + xlate_group_action__(ctx, group); + } else { + return true; + } + } + + return false; +} + static void xlate_ofpact_resubmit(struct xlate_ctx *ctx, const struct ofpact_resubmit *resubmit) @@ -2354,7 +2571,9 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, break; case OFPACT_GROUP: - /* XXX not yet implemented */ + if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) { + return; + } break; case OFPACT_CONTROLLER: @@ -2969,11 +3188,11 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) if (nl_attr_oversized(ctx.xout->odp_actions.size)) { /* These datapath actions are too big for a Netlink attribute, so we - * can't execute them. */ - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); - - VLOG_ERR_RL(&rl, "discarding oversize datapath actions"); - ofpbuf_clear(&ctx.xout->odp_actions); + * can't hand them to the kernel directly. dpif_execute() can execute + * them one by one with help, so just mark the result as SLOW_ACTION to + * prevent the flow from being installed. */ + COVERAGE_INC(xlate_actions_oversize); + ctx.xout->slow |= SLOW_ACTION; } ofpbuf_uninit(&ctx.stack);