+static struct meter *
+meter_create(const struct ofputil_meter_config *config,
+ ofproto_meter_id provider_meter_id)
+{
+ struct meter *meter;
+
+ meter = xzalloc(sizeof *meter);
+ meter->provider_meter_id = provider_meter_id;
+ meter->created = time_msec();
+ list_init(&meter->rules);
+
+ meter_update(meter, config);
+
+ return meter;
+}
+
+static void
+meter_delete(struct ofproto *ofproto, uint32_t first, uint32_t last)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ uint32_t mid;
+ for (mid = first; mid <= last; ++mid) {
+ struct meter *meter = ofproto->meters[mid];
+ if (meter) {
+ ofproto->meters[mid] = NULL;
+ ofproto->ofproto_class->meter_del(ofproto,
+ meter->provider_meter_id);
+ free(meter->bands);
+ free(meter);
+ }
+ }
+}
+
+static enum ofperr
+handle_add_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm)
+{
+ ofproto_meter_id provider_meter_id = { UINT32_MAX };
+ struct meter **meterp = &ofproto->meters[mm->meter.meter_id];
+ enum ofperr error;
+
+ if (*meterp) {
+ return OFPERR_OFPMMFC_METER_EXISTS;
+ }
+
+ error = ofproto->ofproto_class->meter_set(ofproto, &provider_meter_id,
+ &mm->meter);
+ if (!error) {
+ ovs_assert(provider_meter_id.uint32 != UINT32_MAX);
+ *meterp = meter_create(&mm->meter, provider_meter_id);
+ }
+ return error;
+}
+
+static enum ofperr
+handle_modify_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm)
+{
+ struct meter *meter = ofproto->meters[mm->meter.meter_id];
+ enum ofperr error;
+ uint32_t provider_meter_id;
+
+ if (!meter) {
+ return OFPERR_OFPMMFC_UNKNOWN_METER;
+ }
+
+ provider_meter_id = meter->provider_meter_id.uint32;
+ error = ofproto->ofproto_class->meter_set(ofproto,
+ &meter->provider_meter_id,
+ &mm->meter);
+ ovs_assert(meter->provider_meter_id.uint32 == provider_meter_id);
+ if (!error) {
+ meter_update(meter, &mm->meter);
+ }
+ return error;
+}
+
+static enum ofperr
+handle_delete_meter(struct ofconn *ofconn, const struct ofp_header *oh,
+ struct ofputil_meter_mod *mm)
+ OVS_EXCLUDED(ofproto_mutex)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ uint32_t meter_id = mm->meter.meter_id;
+ struct rule_collection rules;
+ enum ofperr error = 0;
+ uint32_t first, last;
+
+ if (meter_id == OFPM13_ALL) {
+ first = 1;
+ last = ofproto->meter_features.max_meters;
+ } else {
+ if (!meter_id || meter_id > ofproto->meter_features.max_meters) {
+ return 0;
+ }
+ first = last = meter_id;
+ }
+
+ /* First delete the rules that use this meter. If any of those rules are
+ * currently being modified, postpone the whole operation until later. */
+ rule_collection_init(&rules);
+ ovs_mutex_lock(&ofproto_mutex);
+ for (meter_id = first; meter_id <= last; ++meter_id) {
+ struct meter *meter = ofproto->meters[meter_id];
+ if (meter && !list_is_empty(&meter->rules)) {
+ struct rule *rule;
+
+ LIST_FOR_EACH (rule, meter_list_node, &meter->rules) {
+ if (rule->pending) {
+ error = OFPROTO_POSTPONE;
+ goto exit;
+ }
+ rule_collection_add(&rules, rule);
+ }
+ }
+ }
+ if (rules.n > 0) {
+ delete_flows__(ofproto, ofconn, oh, &rules, OFPRR_METER_DELETE);
+ }
+
+ /* Delete the meters. */
+ meter_delete(ofproto, first, last);
+
+exit:
+ ovs_mutex_unlock(&ofproto_mutex);
+ rule_collection_destroy(&rules);
+
+ return error;
+}
+
+static enum ofperr
+handle_meter_mod(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofputil_meter_mod mm;
+ uint64_t bands_stub[256 / 8];
+ struct ofpbuf bands;
+ uint32_t meter_id;
+ enum ofperr error;
+
+ error = reject_slave_controller(ofconn);
+ if (error) {
+ return error;
+ }
+
+ ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub);
+
+ error = ofputil_decode_meter_mod(oh, &mm, &bands);
+ if (error) {
+ goto exit_free_bands;
+ }
+
+ meter_id = mm.meter.meter_id;
+
+ if (mm.command != OFPMC13_DELETE) {
+ /* Fails also when meters are not implemented by the provider. */
+ if (meter_id == 0 || meter_id > OFPM13_MAX) {
+ error = OFPERR_OFPMMFC_INVALID_METER;
+ goto exit_free_bands;
+ } else if (meter_id > ofproto->meter_features.max_meters) {
+ error = OFPERR_OFPMMFC_OUT_OF_METERS;
+ goto exit_free_bands;
+ }
+ if (mm.meter.n_bands > ofproto->meter_features.max_bands) {
+ error = OFPERR_OFPMMFC_OUT_OF_BANDS;
+ goto exit_free_bands;
+ }
+ }
+
+ switch (mm.command) {
+ case OFPMC13_ADD:
+ error = handle_add_meter(ofproto, &mm);
+ break;
+
+ case OFPMC13_MODIFY:
+ error = handle_modify_meter(ofproto, &mm);
+ break;
+
+ case OFPMC13_DELETE:
+ error = handle_delete_meter(ofconn, oh, &mm);
+ break;
+
+ default:
+ error = OFPERR_OFPMMFC_BAD_COMMAND;
+ break;
+ }
+
+exit_free_bands:
+ ofpbuf_uninit(&bands);
+ return error;
+}
+
+static enum ofperr
+handle_meter_features_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofputil_meter_features features;
+ struct ofpbuf *b;
+
+ if (ofproto->ofproto_class->meter_get_features) {
+ ofproto->ofproto_class->meter_get_features(ofproto, &features);
+ } else {
+ memset(&features, 0, sizeof features);
+ }
+ b = ofputil_encode_meter_features_reply(&features, request);
+
+ ofconn_send_reply(ofconn, b);
+ return 0;
+}
+
+static enum ofperr
+handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request,
+ enum ofptype type)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct list replies;
+ uint64_t bands_stub[256 / 8];
+ struct ofpbuf bands;
+ uint32_t meter_id, first, last;
+
+ ofputil_decode_meter_request(request, &meter_id);
+
+ if (meter_id == OFPM13_ALL) {
+ first = 1;
+ last = ofproto->meter_features.max_meters;
+ } else {
+ if (!meter_id || meter_id > ofproto->meter_features.max_meters ||
+ !ofproto->meters[meter_id]) {
+ return OFPERR_OFPMMFC_UNKNOWN_METER;
+ }
+ first = last = meter_id;
+ }
+
+ ofpbuf_use_stub(&bands, bands_stub, sizeof bands_stub);
+ ofpmp_init(&replies, request);
+
+ for (meter_id = first; meter_id <= last; ++meter_id) {
+ struct meter *meter = ofproto->meters[meter_id];
+ if (!meter) {
+ continue; /* Skip non-existing meters. */
+ }
+ if (type == OFPTYPE_METER_STATS_REQUEST) {
+ struct ofputil_meter_stats stats;
+
+ stats.meter_id = meter_id;
+
+ /* Provider sets the packet and byte counts, we do the rest. */
+ stats.flow_count = list_size(&meter->rules);
+ calc_duration(meter->created, time_msec(),
+ &stats.duration_sec, &stats.duration_nsec);
+ stats.n_bands = meter->n_bands;
+ ofpbuf_clear(&bands);
+ stats.bands
+ = ofpbuf_put_uninit(&bands,
+ meter->n_bands * sizeof *stats.bands);
+
+ if (!ofproto->ofproto_class->meter_get(ofproto,
+ meter->provider_meter_id,
+ &stats)) {
+ ofputil_append_meter_stats(&replies, &stats);
+ }
+ } else { /* type == OFPTYPE_METER_CONFIG_REQUEST */
+ struct ofputil_meter_config config;
+
+ config.meter_id = meter_id;
+ config.flags = meter->flags;
+ config.n_bands = meter->n_bands;
+ config.bands = meter->bands;
+ ofputil_append_meter_config(&replies, &config);
+ }
+ }
+
+ ofconn_send_replies(ofconn, &replies);
+ ofpbuf_uninit(&bands);
+ return 0;
+}
+
+bool
+ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
+ struct ofgroup **group)
+ OVS_TRY_RDLOCK(true, (*group)->rwlock)
+{
+ ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+ HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
+ hash_int(group_id, 0), &ofproto->groups) {
+ if ((*group)->group_id == group_id) {
+ ovs_rwlock_rdlock(&(*group)->rwlock);
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ return true;
+ }
+ }
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ return false;
+}
+
+void
+ofproto_group_release(struct ofgroup *group)
+ OVS_RELEASES(group->rwlock)
+{
+ ovs_rwlock_unlock(&group->rwlock);
+}
+
+static bool
+ofproto_group_write_lookup(const struct ofproto *ofproto, uint32_t group_id,
+ struct ofgroup **group)
+ OVS_TRY_WRLOCK(true, ofproto->groups_rwlock)
+ OVS_TRY_WRLOCK(true, (*group)->rwlock)
+{
+ ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+ HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
+ hash_int(group_id, 0), &ofproto->groups) {
+ if ((*group)->group_id == group_id) {
+ ovs_rwlock_wrlock(&(*group)->rwlock);
+ return true;
+ }
+ }
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ return false;
+}
+
+static bool
+ofproto_group_exists__(const struct ofproto *ofproto, uint32_t group_id)
+ OVS_REQ_RDLOCK(ofproto->groups_rwlock)
+{
+ struct ofgroup *grp;
+
+ HMAP_FOR_EACH_IN_BUCKET (grp, hmap_node,
+ hash_int(group_id, 0), &ofproto->groups) {
+ if (grp->group_id == group_id) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool
+ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id)
+ OVS_EXCLUDED(ofproto->groups_rwlock)
+{
+ bool exists;
+
+ ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+ exists = ofproto_group_exists__(ofproto, group_id);
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+
+ return exists;
+}
+
+static uint32_t
+group_get_ref_count(struct ofgroup *group)
+ OVS_EXCLUDED(ofproto_mutex)
+{
+ struct ofproto *ofproto = group->ofproto;
+ struct rule_criteria criteria;
+ struct rule_collection rules;
+ struct match match;
+ enum ofperr error;
+ uint32_t count;
+
+ match_init_catchall(&match);
+ rule_criteria_init(&criteria, 0xff, &match, 0, htonll(0), htonll(0),
+ OFPP_ANY, group->group_id);
+ ovs_mutex_lock(&ofproto_mutex);
+ error = collect_rules_loose(ofproto, &criteria, &rules);
+ ovs_mutex_unlock(&ofproto_mutex);
+ rule_criteria_destroy(&criteria);
+
+ count = !error && rules.n < UINT32_MAX ? rules.n : UINT32_MAX;
+
+ rule_collection_destroy(&rules);
+ return count;
+}
+
+static void
+append_group_stats(struct ofgroup *group, struct list *replies)
+ OVS_REQ_RDLOCK(group->rwlock)
+{
+ struct ofputil_group_stats ogs;
+ struct ofproto *ofproto = group->ofproto;
+ long long int now = time_msec();
+ int error;
+
+ ogs.bucket_stats = xmalloc(group->n_buckets * sizeof *ogs.bucket_stats);
+
+ /* Provider sets the packet and byte counts, we do the rest. */
+ ogs.ref_count = group_get_ref_count(group);
+ ogs.n_buckets = group->n_buckets;
+
+ error = (ofproto->ofproto_class->group_get_stats
+ ? ofproto->ofproto_class->group_get_stats(group, &ogs)
+ : EOPNOTSUPP);
+ if (error) {
+ ogs.packet_count = UINT64_MAX;
+ ogs.byte_count = UINT64_MAX;
+ memset(ogs.bucket_stats, 0xff,
+ ogs.n_buckets * sizeof *ogs.bucket_stats);
+ }
+
+ ogs.group_id = group->group_id;
+ calc_duration(group->created, now, &ogs.duration_sec, &ogs.duration_nsec);
+
+ ofputil_append_group_stats(replies, &ogs);
+
+ free(ogs.bucket_stats);
+}
+
+static enum ofperr
+handle_group_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct list replies;
+ enum ofperr error;
+ struct ofgroup *group;
+ uint32_t group_id;
+
+ error = ofputil_decode_group_stats_request(request, &group_id);
+ if (error) {
+ return error;
+ }
+
+ ofpmp_init(&replies, request);
+
+ if (group_id == OFPG_ALL) {
+ ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+ HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
+ ovs_rwlock_rdlock(&group->rwlock);
+ append_group_stats(group, &replies);
+ ovs_rwlock_unlock(&group->rwlock);
+ }
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ } else {
+ if (ofproto_group_lookup(ofproto, group_id, &group)) {
+ append_group_stats(group, &replies);
+ ofproto_group_release(group);
+ }
+ }
+
+ ofconn_send_replies(ofconn, &replies);
+
+ return 0;
+}
+
+static enum ofperr
+handle_group_desc_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct list replies;
+ struct ofputil_group_desc gds;
+ struct ofgroup *group;
+
+ ofpmp_init(&replies, request);
+
+ ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+ HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
+ gds.group_id = group->group_id;
+ gds.type = group->type;
+ ofputil_append_group_desc_reply(&gds, &group->buckets, &replies);
+ }
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+
+ ofconn_send_replies(ofconn, &replies);
+
+ return 0;
+}
+
+static enum ofperr
+handle_group_features_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
+ struct ofpbuf *msg;
+
+ msg = ofputil_encode_group_features_reply(&p->ogf, request);
+ if (msg) {
+ ofconn_send_reply(ofconn, msg);
+ }
+
+ return 0;
+}
+
+static enum ofperr
+handle_queue_get_config_request(struct ofconn *ofconn,
+ const struct ofp_header *oh)
+{
+ struct ofproto *p = ofconn_get_ofproto(ofconn);
+ struct netdev_queue_dump queue_dump;
+ struct ofport *ofport;
+ unsigned int queue_id;
+ struct ofpbuf *reply;
+ struct smap details;
+ ofp_port_t request;
+ enum ofperr error;
+
+ error = ofputil_decode_queue_get_config_request(oh, &request);
+ if (error) {
+ return error;
+ }
+
+ ofport = ofproto_get_port(p, request);
+ if (!ofport) {
+ return OFPERR_OFPQOFC_BAD_PORT;
+ }
+
+ reply = ofputil_encode_queue_get_config_reply(oh);
+
+ smap_init(&details);
+ NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &queue_dump, ofport->netdev) {
+ struct ofputil_queue_config queue;
+
+ /* None of the existing queues have compatible properties, so we
+ * hard-code omitting min_rate and max_rate. */
+ queue.queue_id = queue_id;
+ queue.min_rate = UINT16_MAX;
+ queue.max_rate = UINT16_MAX;
+ ofputil_append_queue_get_config_reply(reply, &queue);
+ }
+ smap_destroy(&details);
+
+ ofconn_send_reply(ofconn, reply);
+
+ return 0;
+}
+
+/* Implements OFPGC11_ADD
+ * in which no matching flow already exists in the flow table.
+ *
+ * Adds the flow specified by 'ofm', which is followed by 'n_actions'
+ * ofp_actions, to the ofproto's flow table. Returns 0 on success, an OpenFlow
+ * error code on failure, or OFPROTO_POSTPONE if the operation cannot be
+ * initiated now but may be retried later.
+ *
+ * Upon successful return, takes ownership of 'fm->ofpacts'. On failure,
+ * ownership remains with the caller.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
+ * if any. */
+static enum ofperr
+add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
+{
+ struct ofgroup *ofgroup;
+ enum ofperr error;
+
+ if (gm->group_id > OFPG_MAX) {
+ return OFPERR_OFPGMFC_INVALID_GROUP;
+ }
+ if (gm->type > OFPGT11_FF) {
+ return OFPERR_OFPGMFC_BAD_TYPE;
+ }
+
+ /* Allocate new group and initialize it. */
+ ofgroup = ofproto->ofproto_class->group_alloc();
+ if (!ofgroup) {
+ VLOG_WARN_RL(&rl, "%s: failed to create group", ofproto->name);
+ return OFPERR_OFPGMFC_OUT_OF_GROUPS;
+ }
+
+ ovs_rwlock_init(&ofgroup->rwlock);
+ ofgroup->ofproto = ofproto;
+ ofgroup->group_id = gm->group_id;
+ ofgroup->type = gm->type;
+ ofgroup->created = ofgroup->modified = time_msec();
+
+ list_move(&ofgroup->buckets, &gm->buckets);
+ ofgroup->n_buckets = list_size(&ofgroup->buckets);
+
+ /* Construct called BEFORE any locks are held. */
+ error = ofproto->ofproto_class->group_construct(ofgroup);
+ if (error) {
+ goto free_out;
+ }
+
+ /* We wrlock as late as possible to minimize the time we jam any other
+ * threads: No visible state changes before acquiring the lock. */
+ ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+
+ if (ofproto->n_groups[gm->type] >= ofproto->ogf.max_groups[gm->type]) {
+ error = OFPERR_OFPGMFC_OUT_OF_GROUPS;
+ goto unlock_out;
+ }
+
+ if (ofproto_group_exists__(ofproto, gm->group_id)) {
+ error = OFPERR_OFPGMFC_GROUP_EXISTS;
+ goto unlock_out;
+ }
+
+ if (!error) {
+ /* Insert new group. */
+ hmap_insert(&ofproto->groups, &ofgroup->hmap_node,
+ hash_int(ofgroup->group_id, 0));
+ ofproto->n_groups[ofgroup->type]++;
+
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ return error;
+ }
+
+ unlock_out:
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ ofproto->ofproto_class->group_destruct(ofgroup);
+ free_out:
+ ofputil_bucket_list_destroy(&ofgroup->buckets);
+ ofproto->ofproto_class->group_dealloc(ofgroup);
+
+ return error;
+}
+
+/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on
+ * failure.
+ *
+ * 'ofconn' is used to retrieve the packet buffer specified in fm->buffer_id,
+ * if any. */
+static enum ofperr
+modify_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
+{
+ struct ofgroup *ofgroup;
+ struct ofgroup *victim;
+ enum ofperr error;
+
+ if (gm->group_id > OFPG_MAX) {
+ return OFPERR_OFPGMFC_INVALID_GROUP;
+ }
+
+ if (gm->type > OFPGT11_FF) {
+ return OFPERR_OFPGMFC_BAD_TYPE;
+ }
+
+ victim = ofproto->ofproto_class->group_alloc();
+ if (!victim) {
+ VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
+ return OFPERR_OFPGMFC_OUT_OF_GROUPS;
+ }
+
+ if (!ofproto_group_write_lookup(ofproto, gm->group_id, &ofgroup)) {
+ error = OFPERR_OFPGMFC_UNKNOWN_GROUP;
+ goto free_out;
+ }
+ /* Both group's and its container's write locks held now.
+ * Also, n_groups[] is protected by ofproto->groups_rwlock. */
+ if (ofgroup->type != gm->type
+ && ofproto->n_groups[gm->type] >= ofproto->ogf.max_groups[gm->type]) {
+ error = OFPERR_OFPGMFC_OUT_OF_GROUPS;
+ goto unlock_out;
+ }
+
+ *victim = *ofgroup;
+ list_move(&victim->buckets, &ofgroup->buckets);
+
+ ofgroup->type = gm->type;
+ list_move(&ofgroup->buckets, &gm->buckets);
+ ofgroup->n_buckets = list_size(&ofgroup->buckets);
+
+ error = ofproto->ofproto_class->group_modify(ofgroup, victim);
+ if (!error) {
+ ofputil_bucket_list_destroy(&victim->buckets);
+ ofproto->n_groups[victim->type]--;
+ ofproto->n_groups[ofgroup->type]++;
+ ofgroup->modified = time_msec();
+ } else {
+ ofputil_bucket_list_destroy(&ofgroup->buckets);
+
+ *ofgroup = *victim;
+ list_move(&ofgroup->buckets, &victim->buckets);
+ }
+
+ unlock_out:
+ ovs_rwlock_unlock(&ofgroup->rwlock);
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+ free_out:
+ ofproto->ofproto_class->group_dealloc(victim);
+ return error;
+}
+
+static void
+delete_group__(struct ofproto *ofproto, struct ofgroup *ofgroup)
+ OVS_RELEASES(ofproto->groups_rwlock)
+{
+ struct match match;
+ struct ofputil_flow_mod fm;
+
+ /* Delete all flow entries containing this group in a group action */
+ match_init_catchall(&match);
+ flow_mod_init(&fm, &match, 0, NULL, 0, OFPFC_DELETE);
+ fm.out_group = ofgroup->group_id;
+ handle_flow_mod__(ofproto, NULL, &fm, NULL);
+
+ /* Must wait until existing readers are done,
+ * while holding the container's write lock at the same time. */
+ ovs_rwlock_wrlock(&ofgroup->rwlock);
+ hmap_remove(&ofproto->groups, &ofgroup->hmap_node);
+ /* No-one can find this group any more. */
+ ofproto->n_groups[ofgroup->type]--;
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+
+ ofproto->ofproto_class->group_destruct(ofgroup);
+ ofputil_bucket_list_destroy(&ofgroup->buckets);
+ ovs_rwlock_unlock(&ofgroup->rwlock);
+ ovs_rwlock_destroy(&ofgroup->rwlock);
+ ofproto->ofproto_class->group_dealloc(ofgroup);
+}
+
+/* Implements OFPGC_DELETE. */
+static void
+delete_group(struct ofproto *ofproto, uint32_t group_id)
+{
+ struct ofgroup *ofgroup;
+
+ ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+ if (group_id == OFPG_ALL) {
+ for (;;) {
+ struct hmap_node *node = hmap_first(&ofproto->groups);
+ if (!node) {
+ break;
+ }
+ ofgroup = CONTAINER_OF(node, struct ofgroup, hmap_node);
+ delete_group__(ofproto, ofgroup);
+ /* Lock for each node separately, so that we will not jam the
+ * other threads for too long time. */
+ ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+ }
+ } else {
+ HMAP_FOR_EACH_IN_BUCKET (ofgroup, hmap_node,
+ hash_int(group_id, 0), &ofproto->groups) {
+ if (ofgroup->group_id == group_id) {
+ delete_group__(ofproto, ofgroup);
+ return;
+ }
+ }
+ }
+ ovs_rwlock_unlock(&ofproto->groups_rwlock);
+}
+
+static enum ofperr
+handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofputil_group_mod gm;
+ enum ofperr error;
+
+ error = reject_slave_controller(ofconn);
+ if (error) {
+ return error;
+ }
+
+ error = ofputil_decode_group_mod(oh, &gm);
+ if (error) {
+ return error;
+ }
+
+ switch (gm.command) {
+ case OFPGC11_ADD:
+ return add_group(ofproto, &gm);
+
+ case OFPGC11_MODIFY:
+ return modify_group(ofproto, &gm);
+
+ case OFPGC11_DELETE:
+ delete_group(ofproto, gm.group_id);
+ return 0;
+
+ default:
+ if (gm.command > OFPGC11_DELETE) {
+ VLOG_WARN_RL(&rl, "%s: Invalid group_mod command type %d",
+ ofproto->name, gm.command);
+ }
+ return OFPERR_OFPGMFC_BAD_COMMAND;
+ }
+}
+
+enum ofproto_table_config
+ofproto_table_get_config(const struct ofproto *ofproto, uint8_t table_id)
+{
+ unsigned int value;
+ atomic_read(&ofproto->tables[table_id].config, &value);
+ return (enum ofproto_table_config)value;
+}
+
+static enum ofperr
+table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm)
+{
+ /* Only accept currently supported configurations */
+ if (tm->config & ~OFPTC11_TABLE_MISS_MASK) {
+ return OFPERR_OFPTMFC_BAD_CONFIG;
+ }
+
+ if (tm->table_id == OFPTT_ALL) {
+ int i;
+ for (i = 0; i < ofproto->n_tables; i++) {
+ atomic_store(&ofproto->tables[i].config,
+ (unsigned int)tm->config);
+ }
+ } else if (!check_table_id(ofproto, tm->table_id)) {
+ return OFPERR_OFPTMFC_BAD_TABLE;
+ } else {
+ atomic_store(&ofproto->tables[tm->table_id].config,
+ (unsigned int)tm->config);
+ }
+
+ return 0;
+}
+
+static enum ofperr
+handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofputil_table_mod tm;
+ enum ofperr error;
+
+ error = reject_slave_controller(ofconn);
+ if (error) {
+ return error;
+ }
+
+ error = ofputil_decode_table_mod(oh, &tm);
+ if (error) {
+ return error;
+ }
+
+ return table_mod(ofproto, &tm);
+}
+
+static enum ofperr
+handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
+ OVS_EXCLUDED(ofproto_mutex)
+{
+ const struct ofp_header *oh = ofpbuf_data(msg);
+ enum ofptype type;
+ enum ofperr error;
+
+ error = ofptype_decode(&type, oh);
+ if (error) {
+ return error;
+ }
+ if (oh->version >= OFP13_VERSION && ofpmsg_is_stat_request(oh)
+ && ofpmp_more(oh)) {
+ /* We have no buffer implementation for multipart requests.
+ * Report overflow for requests which consists of multiple
+ * messages. */
+ return OFPERR_OFPBRC_MULTIPART_BUFFER_OVERFLOW;
+ }
+
+ switch (type) {
+ /* OpenFlow requests. */
+ case OFPTYPE_ECHO_REQUEST:
+ return handle_echo_request(ofconn, oh);
+
+ case OFPTYPE_FEATURES_REQUEST:
+ return handle_features_request(ofconn, oh);
+
+ case OFPTYPE_GET_CONFIG_REQUEST:
+ return handle_get_config_request(ofconn, oh);