2 # NEPI, a framework to manage network experiments
3 # Copyright (C) 2013 INRIA
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 # Author: Alina Quereilhac <alina.quereilhac@inria.fr>
20 from nepi.execution.ec import ExperimentController
28 class ExperimentRunner(object):
29 """ The ExperimentRunner entity is reponsible of
30 re-running an experiment described by an ExperimentController
35 super(ExperimentRunner, self).__init__()
37 def run(self, ec, min_runs = 1, max_runs = -1, wait_time = 0,
38 wait_guids = [], compute_metric_callback = None,
39 evaluate_convergence_callback = None ):
40 """ Re-runs a same experiment multiple times
42 :param ec: Experiment description of experiment to run
43 :type name: ExperimentController
44 :rtype: EperimentController
46 :param min_runs: Minimum number of repetitions for experiment
50 :param max_runs: Maximum number of repetitions for experiment
54 :param wait_time: Time to wait in seconds between invoking
55 ec.deploy() and ec.release()
59 :param wait_guids: List of guids to pass to ec.wait_finished
60 after invoking ec.deploy()
64 :param compute_metric_callback: function to invoke after each
65 experiment run, to compute an experiment metric.
66 It will be invoked with the ec and the run count as arguments,
67 and it must return a numeric value for the computed metric:
69 metric = compute_metric_callback(ec, run)
74 :param evaluate_convergence_callback: function to evaluate whether the
75 collected metric samples have converged and the experiment runner
76 can stop. It will be invoked with the ec, the run count and the
77 list of collected metric samples as argument, and it must return
80 stop = evaluate_convergence_callback(ec, run, metrics)
82 If stop is True, then the runner will exit.
89 if (not max_runs or max_runs < 0) and not compute_metric_callback:
90 msg = "Undefined STOP condition, set stop_callback or max_runs"
91 raise RuntimeError, msg
93 if compute_metric_callback and not evaluate_convergence_callback:
94 evaluate_convergence_callback = self.evaluate_normal_convergence
95 ec.logger.info(" Treating data as normal to evaluate convergence. "
96 "Experiment will stop when the standard error with 95% "
97 "confidence interval is >= 5% of the mean of the collected samples ")
99 # Set useRunId = True in Collectors to make sure results are
100 # independently stored.
101 collectors = ec.get_resources_by_type("Collector")
102 for collector in collectors:
103 collector.set("useRunId", True)
105 dirpath = tempfile.mkdtemp()
106 filepath = ec.save(dirpath)
113 ec = self.run_experiment(filepath, wait_time, wait_guids)
115 ec.logger.info(" RUN %d \n" % run)
117 if run >= min_runs and max_runs > -1 and run >= max_runs :
120 if compute_metric_callback:
121 metric = compute_metric_callback(ec, run)
122 if metric is not None:
123 samples.append(metric)
125 if run >= min_runs and evaluate_convergence_callback:
126 if evaluate_convergence_callback(ec, run, samples):
132 def evaluate_normal_convergence(self, ec, run, samples):
133 if len(samples) == 0:
134 msg = "0 samples collected"
135 raise RuntimeError, msg
137 x = numpy.array(samples)
140 se = std / math.sqrt(n)
144 ec.logger.info(" RUN %d - SAMPLES %d MEAN %.2f STD %.2f SE95%% %.2f \n" % (
145 run, n, m, std, se95 ) )
147 return m * 0.05 >= se95
149 def run_experiment(self, filepath, wait_time, wait_guids):
150 ec = ExperimentController.load(filepath)
154 ec.wait_finished(wait_guids)
155 time.sleep(wait_time)