diff --git a/execution/strategies/__init__.py b/execution/strategies/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/execution/strategies/config.py b/execution/strategies/config.py
index f24f2ed9bfbb5a053e72019f522b5d057548a497..a28c3b856c7c99399f7924b169be146f971446f3 100644
--- a/execution/strategies/config.py
+++ b/execution/strategies/config.py
@@ -12,4 +12,5 @@ class ExperimentConfig:
kafka_streams_commit_interval_ms: int
execution_minutes: int
benchmarking_strategy: object
- subexperiment_executor: object
\ No newline at end of file
+ subexperiment_executor: object
+ subexperiment_evaluator: object
\ No newline at end of file
diff --git a/execution/strategies/strategies/config.py b/execution/strategies/strategies/config.py
index 92dfa919018169457e64c7d97b0f1eb1f0867d36..4a633c1135b2110f76f2dbd0b916e61483c96940 100644
--- a/execution/strategies/strategies/config.py
+++ b/execution/strategies/strategies/config.py
@@ -11,5 +11,4 @@ class SubexperimentConfig:
cpu_limit: str
memory_limit: str
kafka_streams_commit_interval_ms: int
- execution_minutes: int
- subexperiment_executor: object
\ No newline at end of file
+ execution_minutes: int
\ No newline at end of file
diff --git a/execution/strategies/strategies/default_strategy.py b/execution/strategies/strategies/default_strategy.py
index 93c17c58a5f335824748ae86dfbeb6049696575e..41419468fe873aea2308c564e83b3cca71135afe 100644
--- a/execution/strategies/strategies/default_strategy.py
+++ b/execution/strategies/strategies/default_strategy.py
@@ -11,6 +11,6 @@ def execute(config):
subexperiment_counter+=1
print(f"Run subexperiment {subexperiment_counter}/{subexperiments_total} with dimension value {dim_value} and {replica} replicas.")
- subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, dim_value, replica, config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.subexperiment_executor)
+ subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, dim_value, replica, config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
config.subexperiment_executor.execute(subexperiment_config)
diff --git a/execution/strategies/strategies/step_strategy.py b/execution/strategies/strategies/step_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0e1225540977410523675fd17c61fecc3c92a18
--- /dev/null
+++ b/execution/strategies/strategies/step_strategy.py
@@ -0,0 +1,25 @@
+# Contains the default strategy that executes a subexperiment for all combinations of instances and dimension values.
+
+import os
+from .config import SubexperimentConfig
+
+
+def execute(config):
+ subexperiment_counter=0
+ subexperiments_total=len(config.dim_values)*len(config.replicas)
+ i=0
+ j=0
+ while i in range(len(config.dim_values)):
+ while j in range(len(config.replicas)):
+ subexperiment_counter+=1
+ print(f"Run subexperiment {subexperiment_counter}/{subexperiments_total} with dimension value {config.dim_values[i]} and {config.replicas[j]} replicas.")
+
+ subexperiment_config = SubexperimentConfig(config.use_case, subexperiment_counter, config.dim_values[i], config.replicas[j], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+
+ config.subexperiment_executor.execute(subexperiment_config)
+ result = config.subexperiment_evaluator.execute()
+ if result:
+ i+=1
+ else:
+ j+=1
+ i+=1
\ No newline at end of file
diff --git a/execution/strategies/tests/__init__.py b/execution/strategies/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/execution/strategies/tests/test_step_strategy.py b/execution/strategies/tests/test_step_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..2826d218bb911d7e5821f0d55a99073cd080fe13
--- /dev/null
+++ b/execution/strategies/tests/test_step_strategy.py
@@ -0,0 +1,89 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.step_strategy as step_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+ pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicas = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+ [ True , True , True , True , True , True , True ],
+ [ False, False, True , True , True , True , True ],
+ [ False, False, True , True , True , True , True ],
+ [ False, False, False, True , True , True , True ],
+ [ False, False, False, False, True , True , True ],
+ [ False, False, False, False, False, False, True ],
+ [ False, False, False, False, False, False, False ]
+ ]
+
+# the expected order of executed experiments
+expected_order = [
+ (0,0),
+ (1,0),
+ (1,1),
+ (1,2),
+ (2,2),
+ (3,2),
+ (3,3),
+ (4,3),
+ (4,4),
+ (5,4),
+ (5,5),
+ (5,6),
+ (6,6)
+ ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+ global experiment_counter, last_experiment, pp
+ print("Simulate subexperiment with config:")
+ pp.pprint(config)
+ last_experiment = (config.dim_value, config.replicas)
+ experiment_counter += 1
+ print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute():
+ print("Evaluating last experiment. Index was:")
+ global expected_order, experiment_counter, last_experiment, successful
+ pp.pprint(expected_order[experiment_counter])
+ assert expected_order[experiment_counter] == last_experiment
+ print("Index was as expected. Evaluation finished.")
+ return successful[last_experiment[0]][last_experiment[1]]
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_step_strategy():
+ print("strt test")
+ assert True
+ # declare parameters
+ uc="test-uc"
+ partitions=40
+ cpu_limit="1000m"
+ memory_limit="4Gi"
+ kafka_streams_commit_interval_ms=100
+ execution_minutes=5
+
+ # execute
+ experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, step_strategy, subexperiment_executor, subexperiment_evaluator)
+ executor = ExperimentExecutor(experiment_config)
+ executor.execute()
\ No newline at end of file
diff --git a/execution/theodolite.py b/execution/theodolite.py
index d7b3d940ab9ad2975998571c07ffccc57dfa5eea..ce1c238101b86b02c2334838302969f45d404f0b 100755
--- a/execution/theodolite.py
+++ b/execution/theodolite.py
@@ -20,6 +20,9 @@ benchmark_strategy=sys.argv[9] if len(sys.argv) >= 10 and sys.argv[9] else "defa
print("Chosen benchmarking strategy: "+benchmark_strategy)
print("Going to execute " + str(len(dim_values)*len(replicas)) + " subexperiments in total..")
+# todo set noop evaluator for default strategy
experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, default_strategy, subexperiment_executor)
executor = ExperimentExecutor(experiment_config)
executor.execute()
+
+# todo add step strategy