X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=src%2Fnepi%2Fexecution%2Frunner.py;h=75e766f4273af0995d790d8f38f99c43e693c573;hb=bac63fdc5983e2ade1902f711c1e7899d82ca4ae;hp=6757d7b5a6827578f78d6a784863870617901365;hpb=6c439bea6cf4d6af7512fc746dca75118bf39d39;p=nepi.git diff --git a/src/nepi/execution/runner.py b/src/nepi/execution/runner.py index 6757d7b5..75e766f4 100644 --- a/src/nepi/execution/runner.py +++ b/src/nepi/execution/runner.py @@ -40,36 +40,30 @@ class ExperimentRunner(object): """ Re-runs a same experiment multiple times :param ec: Experiment description of experiment to run - :type name: ExperimentController - :rtype: EperimentController + :type ec: ExperimentController :param min_runs: Minimum number of repetitions for experiment - :type name: int - :rtype: int + :type min_runs: int :param max_runs: Maximum number of repetitions for experiment - :type name: int - :rtype: int + :type max_runs: int :param wait_time: Time to wait in seconds between invoking ec.deploy() and ec.release() - :type name: float - :rtype: float + :type wait_time: float :param wait_guids: List of guids to pass to ec.wait_finished after invoking ec.deploy() - :type name: list - :rtype: list of int + :type wait_guids: list :param compute_metric_callback: function to invoke after each experiment run, to compute an experiment metric. It will be invoked with the ec and the run count as arguments, - and it must return the value of the computed metric: + and it must return a numeric value for the computed metric: metric = compute_metric_callback(ec, run) - :type name: function - :rtype: function + :type compute_metric_callback: function :param evaluate_convergence_callback: function to evaluate whether the collected metric samples have converged and the experiment runner @@ -81,8 +75,7 @@ class ExperimentRunner(object): If stop is True, then the runner will exit. - :type name: function - :rtype: function + :type evaluate_convergence_callback: function """ @@ -96,11 +89,8 @@ class ExperimentRunner(object): "Experiment will stop when the standard error with 95% " "confidence interval is >= 5% of the mean of the collected samples ") - # Set useRunId = True in Collectors to make sure results are - # independently stored. - collectors = ec.get_resources_by_type("Collector") - for collector in collectors: - collector.set("useRunId", True) + # Force persistence of experiment controller + ec._persist = True dirpath = tempfile.mkdtemp() filepath = ec.save(dirpath) @@ -119,11 +109,12 @@ class ExperimentRunner(object): if compute_metric_callback: metric = compute_metric_callback(ec, run) - samples.append(metric) + if metric is not None: + samples.append(metric) - if run >= min_runs and evaluate_convergence_callback: - if evaluate_convergence_callback(ec, run, samples): - break + if run >= min_runs and evaluate_convergence_callback: + if evaluate_convergence_callback(ec, run, samples): + break del ec return run @@ -132,7 +123,7 @@ class ExperimentRunner(object): if len(samples) == 0: msg = "0 samples collected" raise RuntimeError, msg - + x = numpy.array(samples) n = len(samples) std = x.std()