""" Re-runs a same experiment multiple times
:param ec: Experiment description of experiment to run
- :type name: ExperimentController
- :rtype: EperimentController
+ :type ec: ExperimentController
:param min_runs: Minimum number of repetitions for experiment
- :type name: int
- :rtype: int
+ :type min_runs: int
:param max_runs: Maximum number of repetitions for experiment
- :type name: int
- :rtype: int
+ :type max_runs: int
:param wait_time: Time to wait in seconds between invoking
ec.deploy() and ec.release()
- :type name: float
- :rtype: float
+ :type wait_time: float
:param wait_guids: List of guids to pass to ec.wait_finished
after invoking ec.deploy()
- :type name: list
- :rtype: list of int
+ :type wait_guids: list
:param compute_metric_callback: function to invoke after each
experiment run, to compute an experiment metric.
It will be invoked with the ec and the run count as arguments,
- and it must return the value of the computed metric:
+ and it must return a numeric value for the computed metric:
metric = compute_metric_callback(ec, run)
- :type name: function
- :rtype: function
+ :type compute_metric_callback: function
:param evaluate_convergence_callback: function to evaluate whether the
collected metric samples have converged and the experiment runner
If stop is True, then the runner will exit.
- :type name: function
- :rtype: function
+ :type evaluate_convergence_callback: function
"""
"Experiment will stop when the standard error with 95% "
"confidence interval is >= 5% of the mean of the collected samples ")
- # Set useRunId = True in Collectors to make sure results are
- # independently stored.
- collectors = ec.get_resources_by_type("Collector")
- for collector in collectors:
- collector.set("useRunId", True)
+ # Force persistence of experiment controller
+ ec._persist = True
dirpath = tempfile.mkdtemp()
filepath = ec.save(dirpath)
if compute_metric_callback:
metric = compute_metric_callback(ec, run)
- samples.append(metric)
+ if metric is not None:
+ samples.append(metric)
- if run >= min_runs and evaluate_convergence_callback:
- if evaluate_convergence_callback(ec, run, samples):
- break
+ if run >= min_runs and evaluate_convergence_callback:
+ if evaluate_convergence_callback(ec, run, samples):
+ break
del ec
return run
if len(samples) == 0:
msg = "0 samples collected"
raise RuntimeError, msg
-
+
x = numpy.array(samples)
n = len(samples)
std = x.std()