diff --git a/execution/theodolite.py b/execution/theodolite.py
index a7e2d708ca88643e24a5115b2574ecaddf567a04..a05d58cc764b1aa44aac9044320746a6c6ab9bf6 100755
--- a/execution/theodolite.py
+++ b/execution/theodolite.py
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
 
+import argparse
 import logging  # logging
 import os
 import sys
@@ -14,21 +15,68 @@ import strategies.subexperiment_execution.subexperiment_executor as subexperimen
 import strategies.subexperiment_evaluation.subexperiment_evaluator as subexperiment_evaluator
 
 
-def main():
-    uc = sys.argv[1]
-    dim_values = sys.argv[2].split(',')
-    replicas = sys.argv[3].split(',')
-    partitions = sys.argv[4] if len(sys.argv) >= 5 and sys.argv[4] else 40
-    cpu_limit = sys.argv[5] if len(sys.argv) >= 6 and sys.argv[5] else "1000m"
-    memory_limit = sys.argv[6] if len(sys.argv) >= 7 and sys.argv[6] else "4Gi"
-    kafka_streams_commit_interval_ms = sys.argv[7] if len(
-        sys.argv) >= 8 and sys.argv[7] else 100
-    execution_minutes = sys.argv[8] if len(
-        sys.argv) >= 9 and sys.argv[8] else 5
-    domain_restriction = bool(sys.argv[9]) if len(
-        sys.argv) >= 10 and sys.argv[9] == "restrict-domain" else False
-    search_strategy = sys.argv[10] if len(sys.argv) >= 11 and (
-        sys.argv[10] == "linear-search" or sys.argv[10] == "binary-search") else "default"
+def load_variables():
+    """Load the CLI variables given at the command line"""
+    print('Load CLI variables')
+    parser = argparse.ArgumentParser(description='Run use case Programm')
+    parser.add_argument('uc',
+                        metavar='<uc>',
+                        help='use case number, one of 1, 2, 3 or 4')
+    parser.add_argument('loads',
+                        type=int,
+                        metavar='<load>',
+                        nargs='+',
+                        help='Loads that should be executed')
+    parser.add_argument('--instances', '-i',
+                        dest='instances_list',
+                        default=[1],
+                        type=int,
+                        metavar='<instance>',
+                        nargs='+',
+                        help='List of instances used in benchmarks')
+    parser.add_argument('--partitions', '-p',
+                        default=40,
+                        type=int,
+                        metavar='<partitions>',
+                        help='Number of partitions for Kafka topics')
+    parser.add_argument('--cpu-limit', '-cpu',
+                        default='1000m',
+                        metavar='<CPU limit>',
+                        help='Kubernetes CPU limit')
+    parser.add_argument('--memory-limit', '-mem',
+                        default='4Gi',
+                        metavar='<memory limit>',
+                        help='Kubernetes memory limit')
+    parser.add_argument('--commit-ms',
+                        default=100,
+                        type=int,
+                        metavar='<commit ms>',
+                        help='Kafka Streams commit interval in milliseconds')
+    parser.add_argument('--duration', '-d',
+                        default=5,
+                        type=int,
+                        metavar='<duration>',
+                        help='Duration in minutes subexperiments should be \
+                                executed for')
+    parser.add_argument('--domain-restriction',
+                        action="store_true",
+                        help='To use domain restriction. For details see README')
+    parser.add_argument('--search-strategy',
+                        default='default',
+                        metavar='<strategy>',
+                        help='The benchmarking search strategy. Can be set to default, linear-search or binary-search')
+    parser.add_argument('--reset',
+                        action="store_true",
+                        help='Resets the environment before execution')
+    parser.add_argument('--reset-only',
+                        action="store_true",
+                        help='Only resets the environment. Ignores all other parameters')
+    args = parser.parse_args()
+    print(args)
+    return args
+
+
+def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit, commit_ms, duration, domain_restriction, search_strategy, reset, reset_only):
 
     print(f"Domain restriction of search space activated: {domain_restriction}")
     print(f"Chosen search strategy: {search_strategy}")
@@ -46,17 +94,17 @@ def main():
     if domain_restriction:
         # domain restriction + linear-search
         if search_strategy == "linear-search":
-            print(f"Going to execute at most {len(dim_values)+len(replicas)-1} subexperiments in total..")
+            print(f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
             experiment_config = ExperimentConfig(
                 use_case=uc,
                 exp_id=exp_id,
-                dim_values=dim_values,
-                replicass=replicas,
+                dim_values=loads,
+                replicass=instances_list,
                 partitions=partitions,
                 cpu_limit=cpu_limit,
                 memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-                execution_minutes=execution_minutes,
+                kafka_streams_commit_interval_ms=commit_ms,
+                execution_minutes=duration,
                 domain_restriction_strategy=lower_bound_strategy,
                 search_strategy=linear_search_strategy,
                 subexperiment_executor=subexperiment_executor,
@@ -66,30 +114,30 @@ def main():
             experiment_config = ExperimentConfig(
                 use_case=uc,
                 exp_id=exp_id,
-                dim_values=dim_values,
-                replicass=replicas,
+                dim_values=loads,
+                replicass=instances_list,
                 partitions=partitions,
                 cpu_limit=cpu_limit,
                 memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-                execution_minutes=execution_minutes,
+                kafka_streams_commit_interval_ms=commit_ms,
+                execution_minutes=duration,
                 domain_restriction_strategy=lower_bound_strategy,
                 search_strategy=binary_search_strategy,
                 subexperiment_executor=subexperiment_executor,
                 subexperiment_evaluator=subexperiment_evaluator)
         # domain restriction + check_all
         else:
-            print(f"Going to execute {len(dim_values)*len(replicas)} subexperiments in total..")
+            print(f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
             experiment_config = ExperimentConfig(
                 use_case=uc,
                 exp_id=exp_id,
-                dim_values=dim_values,
-                replicass=replicas,
+                dim_values=loads,
+                replicass=instances_list,
                 partitions=partitions,
                 cpu_limit=cpu_limit,
                 memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-                execution_minutes=execution_minutes,
+                kafka_streams_commit_interval_ms=commit_ms,
+                execution_minutes=duration,
                 domain_restriction_strategy=lower_bound_strategy,
                 search_strategy=check_all_strategy,
                 subexperiment_executor=subexperiment_executor,
@@ -98,17 +146,17 @@ def main():
     else:
         # no domain restriction + linear-search
         if search_strategy == "linear-search":
-            print(f"Going to execute at most {len(dim_values)*len(replicas)} subexperiments in total..")
+            print(f"Going to execute at most {len(loads)*len(instances_list)} subexperiments in total..")
             experiment_config = ExperimentConfig(
                 use_case=uc,
                 exp_id=exp_id,
-                dim_values=dim_values,
-                replicass=replicas,
+                dim_values=loads,
+                replicass=instances_list,
                 partitions=partitions,
                 cpu_limit=cpu_limit,
                 memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-                execution_minutes=execution_minutes,
+                kafka_streams_commit_interval_ms=commit_ms,
+                execution_minutes=duration,
                 domain_restriction_strategy=no_lower_bound_strategy,
                 search_strategy=linear_search_strategy,
                 subexperiment_executor=subexperiment_executor,
@@ -118,30 +166,30 @@ def main():
             experiment_config = ExperimentConfig(
                 use_case=uc,
                 exp_id=exp_id,
-                dim_values=dim_values,
-                replicass=replicas,
+                dim_values=loads,
+                replicass=instances_list,
                 partitions=partitions,
                 cpu_limit=cpu_limit,
                 memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-                execution_minutes=execution_minutes,
+                kafka_streams_commit_interval_ms=commit_ms,
+                execution_minutes=duration,
                 domain_restriction_strategy=no_lower_bound_strategy,
                 search_strategy=binary_search_strategy,
                 subexperiment_executor=subexperiment_executor,
                 subexperiment_evaluator=subexperiment_evaluator)
         # no domain restriction + check_all
         else:
-            print(f"Going to execute {len(dim_values)*len(replicas)} subexperiments in total..")
+            print(f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
             experiment_config = ExperimentConfig(
                 use_case=uc,
                 exp_id=exp_id,
-                dim_values=dim_values,
-                replicass=replicas,
+                dim_values=loads,
+                replicass=instances_list,
                 partitions=partitions,
                 cpu_limit=cpu_limit,
                 memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-                execution_minutes=execution_minutes,
+                kafka_streams_commit_interval_ms=commit_ms,
+                execution_minutes=duration,
                 domain_restriction_strategy=no_lower_bound_strategy,
                 search_strategy=check_all_strategy,
                 subexperiment_executor=subexperiment_executor,
@@ -153,4 +201,8 @@ def main():
 
 if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
-    main()
+    args = load_variables()
+    main(args.uc, args.loads, args.instances_list, args.partitions, args.cpu_limit,
+         args.memory_limit, args.commit_ms, args.duration,
+         args.domain_restriction, args.search_strategy, args.reset,
+         args.reset_only)