git://git.onelab.eu
/
nepi.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Commiting improvements to Collector. Local_dir added to ExperimentController
[nepi.git]
/
src
/
nepi
/
execution
/
runner.py
diff --git
a/src/nepi/execution/runner.py
b/src/nepi/execution/runner.py
index
6757d7b
..
75e766f
100644
(file)
--- a/
src/nepi/execution/runner.py
+++ b/
src/nepi/execution/runner.py
@@
-40,36
+40,30
@@
class ExperimentRunner(object):
""" Re-runs a same experiment multiple times
:param ec: Experiment description of experiment to run
""" Re-runs a same experiment multiple times
:param ec: Experiment description of experiment to run
- :type name: ExperimentController
- :rtype: EperimentController
+ :type ec: ExperimentController
:param min_runs: Minimum number of repetitions for experiment
:param min_runs: Minimum number of repetitions for experiment
- :type name: int
- :rtype: int
+ :type min_runs: int
:param max_runs: Maximum number of repetitions for experiment
:param max_runs: Maximum number of repetitions for experiment
- :type name: int
- :rtype: int
+ :type max_runs: int
:param wait_time: Time to wait in seconds between invoking
ec.deploy() and ec.release()
:param wait_time: Time to wait in seconds between invoking
ec.deploy() and ec.release()
- :type name: float
- :rtype: float
+ :type wait_time: float
:param wait_guids: List of guids to pass to ec.wait_finished
after invoking ec.deploy()
:param wait_guids: List of guids to pass to ec.wait_finished
after invoking ec.deploy()
- :type name: list
- :rtype: list of int
+ :type wait_guids: list
:param compute_metric_callback: function to invoke after each
experiment run, to compute an experiment metric.
It will be invoked with the ec and the run count as arguments,
:param compute_metric_callback: function to invoke after each
experiment run, to compute an experiment metric.
It will be invoked with the ec and the run count as arguments,
- and it must return
the value of
the computed metric:
+ and it must return
a numeric value for
the computed metric:
metric = compute_metric_callback(ec, run)
metric = compute_metric_callback(ec, run)
- :type name: function
- :rtype: function
+ :type compute_metric_callback: function
:param evaluate_convergence_callback: function to evaluate whether the
collected metric samples have converged and the experiment runner
:param evaluate_convergence_callback: function to evaluate whether the
collected metric samples have converged and the experiment runner
@@
-81,8
+75,7
@@
class ExperimentRunner(object):
If stop is True, then the runner will exit.
If stop is True, then the runner will exit.
- :type name: function
- :rtype: function
+ :type evaluate_convergence_callback: function
"""
"""
@@
-96,11
+89,8
@@
class ExperimentRunner(object):
"Experiment will stop when the standard error with 95% "
"confidence interval is >= 5% of the mean of the collected samples ")
"Experiment will stop when the standard error with 95% "
"confidence interval is >= 5% of the mean of the collected samples ")
- # Set useRunId = True in Collectors to make sure results are
- # independently stored.
- collectors = ec.get_resources_by_type("Collector")
- for collector in collectors:
- collector.set("useRunId", True)
+ # Force persistence of experiment controller
+ ec._persist = True
dirpath = tempfile.mkdtemp()
filepath = ec.save(dirpath)
dirpath = tempfile.mkdtemp()
filepath = ec.save(dirpath)
@@
-119,11
+109,12
@@
class ExperimentRunner(object):
if compute_metric_callback:
metric = compute_metric_callback(ec, run)
if compute_metric_callback:
metric = compute_metric_callback(ec, run)
- samples.append(metric)
+ if metric is not None:
+ samples.append(metric)
- if run >= min_runs and evaluate_convergence_callback:
- if evaluate_convergence_callback(ec, run, samples):
- break
+
if run >= min_runs and evaluate_convergence_callback:
+
if evaluate_convergence_callback(ec, run, samples):
+
break
del ec
return run
del ec
return run
@@
-132,7
+123,7
@@
class ExperimentRunner(object):
if len(samples) == 0:
msg = "0 samples collected"
raise RuntimeError, msg
if len(samples) == 0:
msg = "0 samples collected"
raise RuntimeError, msg
-
+
x = numpy.array(samples)
n = len(samples)
std = x.std()
x = numpy.array(samples)
n = len(samples)
std = x.std()