From a6b7506dab305d91fc5f2ac6416a714e5fa09dd4 Mon Sep 17 00:00:00 2001 From: Ethan Jackson Date: Wed, 20 Nov 2013 17:12:40 -0800 Subject: [PATCH] ofproto: Remove run_fast() functions. They don't really make sense in a multithreaded architecture. Once flow miss batches are dispatched with, they will be extra useless. Signed-off-by: Ethan Jackson Acked-by: Ben Pfaff --- ofproto/ofproto-dpif.c | 107 ++++++------------------------------- ofproto/ofproto-provider.h | 18 ------- ofproto/ofproto.c | 36 ------------- ofproto/ofproto.h | 2 - vswitchd/bridge.c | 33 +----------- vswitchd/bridge.h | 1 - vswitchd/ovs-vswitchd.c | 2 - 7 files changed, 16 insertions(+), 183 deletions(-) diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 17ddb421d..6af49747f 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -388,8 +388,6 @@ static void port_run(struct ofport_dpif *); static int set_bfd(struct ofport *, const struct smap *); static int set_cfm(struct ofport *, const struct cfm_settings *); static void ofport_update_peer(struct ofport_dpif *); -static void run_fast_rl(void); -static int run_fast(struct ofproto *); struct dpif_completion { struct list list_node; @@ -671,6 +669,8 @@ type_run(const char *type) dpif_run(backer->dpif); + handle_upcalls(backer); + /* The most natural place to push facet statistics is when they're pulled * from the datapath. However, when there are many flows in the datapath, * this expensive operation can occur so frequently, that it reduces our @@ -831,7 +831,6 @@ type_run(const char *type) ovs_rwlock_unlock(&ofproto->facets.rwlock); CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) { facet_revalidate(facet); - run_fast_rl(); } } @@ -998,44 +997,6 @@ process_dpif_port_error(struct dpif_backer *backer, int error) } } -static int -dpif_backer_run_fast(struct dpif_backer *backer) -{ - handle_upcalls(backer); - - return 0; -} - -static int -type_run_fast(const char *type) -{ - struct dpif_backer *backer; - - backer = shash_find_data(&all_dpif_backers, type); - if (!backer) { - /* This is not necessarily a problem, since backers are only - * created on demand. */ - return 0; - } - - return dpif_backer_run_fast(backer); -} - -static void -run_fast_rl(void) -{ - static long long int port_rl = LLONG_MIN; - - if (time_msec() >= port_rl) { - struct ofproto_dpif *ofproto; - - HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) { - run_fast(&ofproto->up); - } - port_rl = time_msec() + 200; - } -} - static void type_wait(const char *type) { @@ -1438,36 +1399,11 @@ destruct(struct ofproto *ofproto_) close_dpif_backer(ofproto->backer); } -static int -run_fast(struct ofproto *ofproto_) -{ - struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); - struct ofproto_packet_in *pin, *next_pin; - struct list pins; - - /* Do not perform any periodic activity required by 'ofproto' while - * waiting for flow restore to complete. */ - if (ofproto_get_flow_restore_wait()) { - return 0; - } - - guarded_list_pop_all(&ofproto->pins, &pins); - LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { - connmgr_send_packet_in(ofproto->up.connmgr, pin); - list_remove(&pin->list_node); - free(CONST_CAST(void *, pin->up.packet)); - free(pin); - } - - return 0; -} - static int run(struct ofproto *ofproto_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_); uint64_t new_seq; - int error; if (mbridge_need_revalidate(ofproto->mbridge)) { ofproto->backer->need_revalidate = REV_RECONFIGURE; @@ -1476,15 +1412,19 @@ run(struct ofproto *ofproto_) ovs_rwlock_unlock(&ofproto->ml->rwlock); } - /* Do not perform any periodic activity below required by 'ofproto' while + /* Do not perform any periodic activity required by 'ofproto' while * waiting for flow restore to complete. */ - if (ofproto_get_flow_restore_wait()) { - return 0; - } + if (!ofproto_get_flow_restore_wait()) { + struct ofproto_packet_in *pin, *next_pin; + struct list pins; - error = run_fast(ofproto_); - if (error) { - return error; + guarded_list_pop_all(&ofproto->pins, &pins); + LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { + connmgr_send_packet_in(ofproto->up.connmgr, pin); + list_remove(&pin->list_node); + free(CONST_CAST(void *, pin->up.packet)); + free(pin); + } } if (ofproto->netflow) { @@ -3643,7 +3583,6 @@ update_stats(struct dpif_backer *backer) delete_unexpected_flow(backer, key, key_len); break; } - run_fast_rl(); } dpif_flow_dump_done(&dump); } @@ -4252,7 +4191,7 @@ facet_push_stats(struct facet *facet, bool may_learn) } static void -push_all_stats__(bool run_fast) +push_all_stats(void) { static long long int rl = LLONG_MIN; struct ofproto_dpif *ofproto; @@ -4269,9 +4208,6 @@ push_all_stats__(bool run_fast) cls_cursor_init(&cursor, &ofproto->facets, NULL); CLS_CURSOR_FOR_EACH (facet, cr, &cursor) { facet_push_stats(facet, false); - if (run_fast) { - run_fast_rl(); - } } ovs_rwlock_unlock(&ofproto->facets.rwlock); } @@ -4279,12 +4215,6 @@ push_all_stats__(bool run_fast) rl = time_msec() + 100; } -static void -push_all_stats(void) -{ - push_all_stats__(true); -} - void rule_dpif_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats) @@ -4435,7 +4365,6 @@ subfacet_destroy_batch(struct dpif_backer *backer, subfacet_reset_dp_stats(subfacets[i], &stats[i]); subfacets[i]->path = SF_NOT_INSTALLED; subfacet_destroy(subfacets[i]); - run_fast_rl(); } } @@ -4718,11 +4647,7 @@ rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes) { struct rule_dpif *rule = rule_dpif_cast(rule_); - /* push_all_stats() can handle flow misses which, when using the learn - * action, can cause rules to be added and deleted. This can corrupt our - * caller's datastructures which assume that rule_get_stats() doesn't have - * an impact on the flow table. To be safe, we disable miss handling. */ - push_all_stats__(false); + push_all_stats(); /* Start from historical data for 'rule' itself that are no longer tracked * in facets. This counts, for example, facets that have expired. */ @@ -6213,14 +6138,12 @@ const struct ofproto_class ofproto_dpif_class = { del, port_open_type, type_run, - type_run_fast, type_wait, alloc, construct, destruct, dealloc, run, - run_fast, wait, get_memory_usage, flush, diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h index bd4ff5fe9..713af8767 100644 --- a/ofproto/ofproto-provider.h +++ b/ofproto/ofproto-provider.h @@ -688,16 +688,6 @@ struct ofproto_class { * Returns 0 if successful, otherwise a positive errno value. */ int (*type_run)(const char *type); - /* Performs periodic activity required on ofprotos of type 'type' - * that needs to be done with the least possible latency. - * - * This is run multiple times per main loop. An ofproto provider may - * implement it or not, according to whether it provides a performance - * boost for that ofproto implementation. - * - * Returns 0 if successful, otherwise a positive errno value. */ - int (*type_run_fast)(const char *type); - /* Causes the poll loop to wake up when a type 'type''s 'run' * function needs to be called, e.g. by calling the timer or fd * waiting functions in poll-loop.h. @@ -781,14 +771,6 @@ struct ofproto_class { * Returns 0 if successful, otherwise a positive errno value. */ int (*run)(struct ofproto *ofproto); - /* Performs periodic activity required by 'ofproto' that needs to be done - * with the least possible latency. - * - * This is run multiple times per main loop. An ofproto provider may - * implement it or not, according to whether it provides a performance - * boost for that ofproto implementation. */ - int (*run_fast)(struct ofproto *ofproto); - /* Causes the poll loop to wake up when 'ofproto''s 'run' function needs to * be called, e.g. by calling the timer or fd waiting functions in * poll-loop.h. */ diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index e312bb70a..8fc9916d5 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -1388,23 +1388,6 @@ ofproto_type_run(const char *datapath_type) return error; } -int -ofproto_type_run_fast(const char *datapath_type) -{ - const struct ofproto_class *class; - int error; - - datapath_type = ofproto_normalize_type(datapath_type); - class = ofproto_class_find__(datapath_type); - - error = class->type_run_fast ? class->type_run_fast(datapath_type) : 0; - if (error && error != EAGAIN) { - VLOG_ERR_RL(&rl, "%s: type_run_fast failed (%s)", - datapath_type, ovs_strerror(error)); - } - return error; -} - void ofproto_type_wait(const char *datapath_type) { @@ -1576,25 +1559,6 @@ ofproto_run(struct ofproto *p) return error; } -/* Performs periodic activity required by 'ofproto' that needs to be done - * with the least possible latency. - * - * It makes sense to call this function a couple of times per poll loop, to - * provide a significant performance boost on some benchmarks with the - * ofproto-dpif implementation. */ -int -ofproto_run_fast(struct ofproto *p) -{ - int error; - - error = p->ofproto_class->run_fast ? p->ofproto_class->run_fast(p) : 0; - if (error && error != EAGAIN) { - VLOG_ERR_RL(&rl, "%s: fastpath run failed (%s)", - p->name, ovs_strerror(error)); - } - return error; -} - void ofproto_wait(struct ofproto *p) { diff --git a/ofproto/ofproto.h b/ofproto/ofproto.h index d6ab1ae56..63ae793a5 100644 --- a/ofproto/ofproto.h +++ b/ofproto/ofproto.h @@ -165,7 +165,6 @@ struct iface_hint { void ofproto_init(const struct shash *iface_hints); int ofproto_type_run(const char *datapath_type); -int ofproto_type_run_fast(const char *datapath_type); void ofproto_type_wait(const char *datapath_type); int ofproto_create(const char *datapath, const char *datapath_type, @@ -174,7 +173,6 @@ void ofproto_destroy(struct ofproto *); int ofproto_delete(const char *name, const char *type); int ofproto_run(struct ofproto *); -int ofproto_run_fast(struct ofproto *); void ofproto_wait(struct ofproto *); bool ofproto_is_alive(const struct ofproto *); diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c index eb631054e..e337ab5ac 100644 --- a/vswitchd/bridge.c +++ b/vswitchd/bridge.c @@ -1469,15 +1469,9 @@ iface_create(struct bridge *br, const struct ovsrec_interface *iface_cfg, struct port *port; int error; - /* Do the bits that can fail up front. - * - * It's a bit dangerous to call bridge_run_fast() here as ofproto's - * internal datastructures may not be consistent. Eventually, when port - * additions and deletions are cheaper, these calls should be removed. */ - bridge_run_fast(); + /* Do the bits that can fail up front. */ ovs_assert(!iface_lookup(br, iface_cfg->name)); error = iface_do_create(br, iface_cfg, port_cfg, &ofp_port, &netdev); - bridge_run_fast(); if (error) { iface_set_ofport(iface_cfg, OFPP_NONE); iface_clear_db_record(iface_cfg); @@ -2258,31 +2252,6 @@ instant_stats_wait(void) } } -/* Performs periodic activity required by bridges that needs to be done with - * the least possible latency. - * - * It makes sense to call this function a couple of times per poll loop, to - * provide a significant performance boost on some benchmarks with ofprotos - * that use the ofproto-dpif implementation. */ -void -bridge_run_fast(void) -{ - struct sset types; - const char *type; - struct bridge *br; - - sset_init(&types); - ofproto_enumerate_types(&types); - SSET_FOR_EACH (type, &types) { - ofproto_type_run_fast(type); - } - sset_destroy(&types); - - HMAP_FOR_EACH (br, node, &all_bridges) { - ofproto_run_fast(br->ofproto); - } -} - void bridge_run(void) { diff --git a/vswitchd/bridge.h b/vswitchd/bridge.h index c1b0a2b52..7bffee8ad 100644 --- a/vswitchd/bridge.h +++ b/vswitchd/bridge.h @@ -22,7 +22,6 @@ void bridge_init(const char *remote); void bridge_exit(void); void bridge_run(void); -void bridge_run_fast(void); void bridge_wait(void); void bridge_get_memory_usage(struct simap *usage); diff --git a/vswitchd/ovs-vswitchd.c b/vswitchd/ovs-vswitchd.c index bc45dac46..990e58f0a 100644 --- a/vswitchd/ovs-vswitchd.c +++ b/vswitchd/ovs-vswitchd.c @@ -114,9 +114,7 @@ main(int argc, char *argv[]) memory_report(&usage); simap_destroy(&usage); } - bridge_run_fast(); bridge_run(); - bridge_run_fast(); unixctl_server_run(unixctl); netdev_run(); -- 2.43.0