From 865971278fc755b4f59560e0ac9bda0efda84be3 Mon Sep 17 00:00:00 2001 From: Simon Ehrenstein <simon.ehrenstein@gmail.com> Date: Tue, 11 Aug 2020 11:15:52 +0200 Subject: [PATCH] Add binary search strategy --- .../strategies/binary_search_strategy.py | 52 +++++++++++ .../tests/test_binary_search_strategy.py | 90 +++++++++++++++++++ .../strategies/tests/test_step_strategy.py | 2 - execution/theodolite.py | 3 + 4 files changed, 145 insertions(+), 2 deletions(-) create mode 100644 execution/strategies/strategies/binary_search_strategy.py create mode 100644 execution/strategies/tests/test_binary_search_strategy.py diff --git a/execution/strategies/strategies/binary_search_strategy.py b/execution/strategies/strategies/binary_search_strategy.py new file mode 100644 index 000000000..f154b6114 --- /dev/null +++ b/execution/strategies/strategies/binary_search_strategy.py @@ -0,0 +1,52 @@ +import os +from .config import SubexperimentConfig + +def searchTransition(config, replica_index, lower, upper, subexperiment_counter): + if lower==upper: + print(f"Run subexperiment {subexperiment_counter} with config {config.dim_values[lower]} {config.replicas[replica_index]}") + subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, config.dim_values[lower], config.replicas[replica_index], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes) + config.subexperiment_executor.execute(subexperiment_config) + result = config.subexperiment_evaluator.execute() + if result==1: # successful, the upper neighbor must be not successful + return lower+1 + else: # not successful + return lower + elif lower+1 == upper: + print(f"Run subexperiment {subexperiment_counter} with config {config.dim_values[lower]} {config.replicas[replica_index]}") + subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, config.dim_values[lower], config.replicas[replica_index], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes) + config.subexperiment_executor.execute(subexperiment_config) + result = config.subexperiment_evaluator.execute() + if result==1: # successful, the upper neighbor must be not successful + print(f"Run subexperiment {subexperiment_counter} with config {config.dim_values[upper]} {config.replicas[replica_index]}") + subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, config.dim_values[upper], config.replicas[replica_index], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes) + config.subexperiment_executor.execute(subexperiment_config) + result = config.subexperiment_evaluator.execute() + if result == 1: + return upper+1 + else: + return upper + else: # not successful + return lower + else: + # test mid + mid=(upper+lower)//2 + print(mid) + print(f"Run subexperiment {subexperiment_counter} with config {config.dim_values[mid]} {config.replicas[replica_index]}") + subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, config.dim_values[mid], config.replicas[replica_index], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes) + config.subexperiment_executor.execute(subexperiment_config) + result = config.subexperiment_evaluator.execute() + if result == 1: # success -> search in (mid+1, upper) + return searchTransition(config, replica_index, mid+1, upper, subexperiment_counter+1) + else: # not success -> search in (lower, mid-1) + return searchTransition(config, replica_index, lower, mid-1, subexperiment_counter+1) + + +def execute(config): + subexperiment_counter=0 + lower = 0 + upper = len(config.dim_values)-1 + j = 0 + while j < len(config.replicas) and lower < len(config.dim_values): + lower = searchTransition(config, j, lower, upper, subexperiment_counter+1) + j+=1 + diff --git a/execution/strategies/tests/test_binary_search_strategy.py b/execution/strategies/tests/test_binary_search_strategy.py new file mode 100644 index 000000000..0a680cb75 --- /dev/null +++ b/execution/strategies/tests/test_binary_search_strategy.py @@ -0,0 +1,90 @@ +import pprint + +from strategies.config import ExperimentConfig +import strategies.strategies.binary_search_strategy as binary_search_strategy +from strategies.experiment_execution import ExperimentExecutor +import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor + +class Object(object): + pass + +pp = pprint.PrettyPrinter(indent=4) + +dim_values = [0, 1, 2, 3, 4, 5, 6] +replicas = [0, 1, 2, 3, 4, 5, 6] + +# True means the experiment was successful +# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively. +# this means the first row starts with (0,0), the second row with (1, 0) etc. +successful = [ + [ True , True , True , True , True , True , True ], + [ False, False, True , True , True , True , True ], + [ False, False, True , True , True , True , True ], + [ False, False, False, True , True , True , True ], + [ False, False, False, False, True , True , True ], + [ False, False, False, False, False, False, True ], + [ False, False, False, False, False, False, False ] + ] + +expected_order = [ + (3,0), # interval (0, 6) + (1,0), + (0,0), + (3,1), # interval (0, 6) + (1,1), + (3,2), # interval (0, 6) + (1,2), + (2,2), + (4,3), # interval (3, 6) + (3,3), + (5,4), # interval (4, 6) + (4,4), + (5,5), # interval (5,6) + (5,6), # interval (5,6) + (6,6) + ] + +last_experiment = (0, 0) +experiment_counter = -1 +subexperiment_executor = Object() + +def subexperiment_executor_executor(config): + global experiment_counter, last_experiment, pp + print("Simulate subexperiment with config:") + pp.pprint(config) + last_experiment = (config.dim_value, config.replicas) + experiment_counter += 1 + print("Simulation complete") + +subexperiment_executor.execute = subexperiment_executor_executor + + +# returns True if the experiment was successful + +subexperiment_evaluator = Object() + +def subexperiment_evaluator_execute(): + print("Evaluating last experiment. Index was:") + global expected_order, experiment_counter, last_experiment, successful + pp.pprint(last_experiment) + print("Index was expected to be:") + pp.pprint(expected_order[experiment_counter]) + assert expected_order[experiment_counter] == last_experiment + print("Index was as expected. Evaluation finished.") + return 1 if successful[last_experiment[0]][last_experiment[1]] else 0 + +subexperiment_evaluator.execute = subexperiment_evaluator_execute + +def test_binary_search_strategy(): + # declare parameters + uc="test-uc" + partitions=40 + cpu_limit="1000m" + memory_limit="4Gi" + kafka_streams_commit_interval_ms=100 + execution_minutes=5 + + # execute + experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, binary_search_strategy, subexperiment_executor, subexperiment_evaluator) + executor = ExperimentExecutor(experiment_config) + executor.execute() \ No newline at end of file diff --git a/execution/strategies/tests/test_step_strategy.py b/execution/strategies/tests/test_step_strategy.py index 2826d218b..afc7071ac 100644 --- a/execution/strategies/tests/test_step_strategy.py +++ b/execution/strategies/tests/test_step_strategy.py @@ -73,8 +73,6 @@ def subexperiment_evaluator_execute(): subexperiment_evaluator.execute = subexperiment_evaluator_execute def test_step_strategy(): - print("strt test") - assert True # declare parameters uc="test-uc" partitions=40 diff --git a/execution/theodolite.py b/execution/theodolite.py index 070a10f15..fbffed02f 100755 --- a/execution/theodolite.py +++ b/execution/theodolite.py @@ -5,6 +5,7 @@ import os from strategies.config import ExperimentConfig import strategies.strategies.default_strategy as default_strategy import strategies.strategies.step_strategy as step_strategy +import strategies.strategies.binary_search_strategy as binary_search_strategy from strategies.experiment_execution import ExperimentExecutor import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor import strategies.subexperiment_evaluation.noop_subexperiment_evaluator as noop_subexperiment_evaluator @@ -25,6 +26,8 @@ print(f"Chosen benchmarking strategy: {benchmark_strategy}") if benchmark_strategy == "step": print(f"Going to execute at most {len(dim_values)+len(replicas)-1} subexperiments in total..") experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, step_strategy, subexperiment_executor, subexperiment_evaluator) +elif benchmark_strategy == "binary-search": + experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, binary_search_strategy, subexperiment_executor, subexperiment_evaluator) else: print(f"Going to execute {len(dim_values)*len(replicas)} subexperiments in total..") experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, default_strategy, subexperiment_executor, noop_subexperiment_evaluator) -- GitLab