diff --git a/execution/README.md b/execution/README.md
index 7db6164ece2927168ce55b81eadd842434b786df..e2d9cdc84b9193df804c36ae543eafac906c2df5 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -158,9 +158,9 @@ The `./theodolite.sh` is the entrypoint for all benchmark executions. Is has to
 * `<duration>`: Duration in minutes subexperiments should be executed for. Optional. Default `5`.
 * `<strategy>`: The benchmarking strategy. Can be set to `default` or `step`. For more details see Section _Benchmarking Strategies_. Default `default`.
 
-### <a name="benchmarking-strategies"></a>Benchmarking Strategies
+### Benchmarking Strategies
 There are the following benchmarking strategies:
 
-* `default`: Execute a subexperiment for each combination of the number of replicas (N), and the number of workload intensities (M). The amount of executed subexperiments is N*M.
-* `step`: After each subexperiment, it is checked, whether the application could handle the workload. If the workload could be handled, the workload intensity is increased in the next subexperiment. Otherwise, the workload intensity is kept the same and the number of instances is increased. The amount of executed subexperiments is at most N+M-1.
+* `default`: Execute a subexperiment for each combination of the number of replicas (N), and the number of workload intensities (M). The amount of executed subexperiments is always N*M.
+* `step`: A heuristic which works as follows: After each subexperiment, it is checked, whether the application could handle the workload. If the workload could be handled, the workload intensity is increased in the next subexperiment. Otherwise, the workload intensity is kept the same and the number of instances is increased. The amount of executed subexperiments is at most N+M-1.
 
diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
index 814f8d105fb8a56305e07d6c06a969f3dc2e1738..84149151366ca674b27ef5655981e4fc9e3635c9 100644
--- a/execution/lag_analysis.py
+++ b/execution/lag_analysis.py
@@ -13,7 +13,7 @@ instances = sys.argv[4]
 execution_minutes = int(sys.argv[5])
 time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
 
-prometheus_query_path = 'http://kube1.se.internal:32529/api/v1/query_range'
+prometheus_query_path = 'http://localhost:9090/api/v1/query_range'
 
 #http://localhost:9090/api/v1/query_range?query=sum%20by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)&start=2015-07-01T20:10:30.781Z&end=2020-07-01T20:11:00.781Z&step=15s
 
@@ -51,6 +51,14 @@ for result in results:
 
 df = pd.DataFrame(d)
 
+# save whether the subexperiment was successful or not, meaning whether the consumer lag was above some threshhold or not
+# Assumption: Due to fluctuations within the record lag measurements, it is sufficient to analyze the second half of measurements.
+second_half = list(map(lambda x: x['value'], filter(lambda x: x['topic'] == 'input', d[len(d)//2:])))
+avg_lag = sum(second_half) / len(second_half)
+with open(r"last_exp_result.txt", "w+") as file:
+    success = 0 if avg_lag > 1000 else 1
+    file.write(str(success))
+
 # Do some analysis
 
 input = df.loc[df['topic'] == "input"]
@@ -109,14 +117,6 @@ df = pd.DataFrame(d)
 
 df.to_csv(f"{filename}_totallag.csv")
 
-# save whether the subexperiment was successful or not, meaning whether the consumer lag was above some threshhold or not
-# Assumption: Due to fluctuations within the record lag measurements, it is sufficient to analyze the second half of measurements.
-second_half = list(map(lambda x: x['value'], d[len(d)//2:]))
-avg_lag = sum(second_half) / len(second_half)
-with open(r"last_exp_result.txt", "w+") as file:
-    success = 0 if avg_lag > 1000 else 1
-    file.write(str(success))
-
 # Load partition count
 
 response = requests.get(prometheus_query_path, params={
diff --git a/execution/strategies/tests/test_step_strategy.py b/execution/strategies/tests/test_step_strategy.py
index 2826d218bb911d7e5821f0d55a99073cd080fe13..bd7a3caef77d671735c74cecd3d2b7c2be056d2b 100644
--- a/execution/strategies/tests/test_step_strategy.py
+++ b/execution/strategies/tests/test_step_strategy.py
@@ -68,13 +68,11 @@ def subexperiment_evaluator_execute():
     pp.pprint(expected_order[experiment_counter])
     assert expected_order[experiment_counter] == last_experiment
     print("Index was as expected. Evaluation finished.")
-    return successful[last_experiment[0]][last_experiment[1]]
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
 
 subexperiment_evaluator.execute = subexperiment_evaluator_execute
 
 def test_step_strategy():
-    print("strt test")
-    assert True
     # declare parameters
     uc="test-uc"
     partitions=40