mirror of
https://github.com/microsoft/autogen.git
synced 2025-09-10 00:36:06 +00:00
parent
da2cd7ca89
commit
b78d0b57c4
@ -492,9 +492,7 @@ def run(
|
|||||||
|
|
||||||
SearchAlgorithm = BlendSearch
|
SearchAlgorithm = BlendSearch
|
||||||
logger.info(
|
logger.info(
|
||||||
"Using search algorithm {}.".format(
|
"Using search algorithm {}.".format(SearchAlgorithm.__name__)
|
||||||
SearchAlgorithm.__class__.__name__
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
SearchAlgorithm = CFO
|
SearchAlgorithm = CFO
|
||||||
@ -504,9 +502,7 @@ def run(
|
|||||||
metric = metric or DEFAULT_METRIC
|
metric = metric or DEFAULT_METRIC
|
||||||
else:
|
else:
|
||||||
SearchAlgorithm = CFO
|
SearchAlgorithm = CFO
|
||||||
logger.info(
|
logger.info("Using search algorithm {}.".format(SearchAlgorithm.__name__))
|
||||||
"Using search algorithm {}.".format(SearchAlgorithm.__class__.__name__)
|
|
||||||
)
|
|
||||||
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC
|
metric = lexico_objectives["metrics"][0] or DEFAULT_METRIC
|
||||||
search_alg = SearchAlgorithm(
|
search_alg = SearchAlgorithm(
|
||||||
metric=metric,
|
metric=metric,
|
||||||
@ -675,14 +671,14 @@ def run(
|
|||||||
num_trials = 0
|
num_trials = 0
|
||||||
if time_budget_s is None:
|
if time_budget_s is None:
|
||||||
time_budget_s = np.inf
|
time_budget_s = np.inf
|
||||||
fail = 0
|
num_failures = 0
|
||||||
ub = (
|
upperbound_num_failures = (
|
||||||
len(evaluated_rewards) if evaluated_rewards else 0
|
len(evaluated_rewards) if evaluated_rewards else 0
|
||||||
) + max_failure
|
) + max_failure
|
||||||
while (
|
while (
|
||||||
time.time() - time_start < time_budget_s
|
time.time() - time_start < time_budget_s
|
||||||
and (num_samples < 0 or num_trials < num_samples)
|
and (num_samples < 0 or num_trials < num_samples)
|
||||||
and fail < ub
|
and num_failures < upperbound_num_failures
|
||||||
):
|
):
|
||||||
while len(_runner.running_trials) < n_concurrent_trials:
|
while len(_runner.running_trials) < n_concurrent_trials:
|
||||||
# suggest trials for spark
|
# suggest trials for spark
|
||||||
@ -690,9 +686,9 @@ def run(
|
|||||||
if trial_next:
|
if trial_next:
|
||||||
num_trials += 1
|
num_trials += 1
|
||||||
else:
|
else:
|
||||||
fail += 1 # break with ub consecutive failures
|
num_failures += 1 # break with upperbound_num_failures consecutive failures
|
||||||
logger.debug(f"consecutive failures is {fail}")
|
logger.debug(f"consecutive failures is {num_failures}")
|
||||||
if fail >= ub:
|
if num_failures >= upperbound_num_failures:
|
||||||
break
|
break
|
||||||
trials_to_run = _runner.running_trials
|
trials_to_run = _runner.running_trials
|
||||||
if not trials_to_run:
|
if not trials_to_run:
|
||||||
@ -730,7 +726,7 @@ def run(
|
|||||||
)
|
)
|
||||||
report(_metric=result)
|
report(_metric=result)
|
||||||
_runner.stop_trial(trial_to_run)
|
_runner.stop_trial(trial_to_run)
|
||||||
fail = 0
|
num_failures = 0
|
||||||
analysis = ExperimentAnalysis(
|
analysis = ExperimentAnalysis(
|
||||||
_runner.get_trials(),
|
_runner.get_trials(),
|
||||||
metric=metric,
|
metric=metric,
|
||||||
@ -766,12 +762,14 @@ def run(
|
|||||||
num_trials = 0
|
num_trials = 0
|
||||||
if time_budget_s is None:
|
if time_budget_s is None:
|
||||||
time_budget_s = np.inf
|
time_budget_s = np.inf
|
||||||
fail = 0
|
num_failures = 0
|
||||||
ub = (len(evaluated_rewards) if evaluated_rewards else 0) + max_failure
|
upperbound_num_failures = (
|
||||||
|
len(evaluated_rewards) if evaluated_rewards else 0
|
||||||
|
) + max_failure
|
||||||
while (
|
while (
|
||||||
time.time() - time_start < time_budget_s
|
time.time() - time_start < time_budget_s
|
||||||
and (num_samples < 0 or num_trials < num_samples)
|
and (num_samples < 0 or num_trials < num_samples)
|
||||||
and fail < ub
|
and num_failures < upperbound_num_failures
|
||||||
):
|
):
|
||||||
trial_to_run = _runner.step()
|
trial_to_run = _runner.step()
|
||||||
if trial_to_run:
|
if trial_to_run:
|
||||||
@ -789,10 +787,11 @@ def run(
|
|||||||
else:
|
else:
|
||||||
report(_metric=result)
|
report(_metric=result)
|
||||||
_runner.stop_trial(trial_to_run)
|
_runner.stop_trial(trial_to_run)
|
||||||
fail = 0
|
num_failures = 0
|
||||||
else:
|
else:
|
||||||
fail += 1 # break with ub consecutive failures
|
# break with upperbound_num_failures consecutive failures
|
||||||
if fail == ub:
|
num_failures += 1
|
||||||
|
if num_failures == upperbound_num_failures:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"fail to sample a trial for {max_failure} times in a row, stopping."
|
f"fail to sample a trial for {max_failure} times in a row, stopping."
|
||||||
)
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user