diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
index 2ae3c593bdebfbcd41393154d07faec171d9243a..3950f12413745a6f3802a2e223123830f7d03649 100644
--- a/execution/lag_analysis.py
+++ b/execution/lag_analysis.py
@@ -5,151 +5,162 @@ from datetime import datetime, timedelta, timezone
 import pandas as pd
 import matplotlib.pyplot as plt
 import csv
-#
-exp_id =  sys.argv[1]
-benchmark = sys.argv[2]
-dim_value = sys.argv[3]
-instances = sys.argv[4]
-execution_minutes = int(sys.argv[5])
-time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
+import logging
 
-prometheus_query_path = 'http://kube1.se.internal:32529/api/v1/query_range'
 
-#http://localhost:9090/api/v1/query_range?query=sum%20by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)&start=2015-07-01T20:10:30.781Z&end=2020-07-01T20:11:00.781Z&step=15s
+def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_base_url = 'http://kube1.se.internal:32529'):
+    print("Main")
+    time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
 
-now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0)
-now = now_local - timedelta(milliseconds=time_diff_ms)
-print(f"Now Local: {now_local}")
-print(f"Now Used: {now}")
+    now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0)
+    now = now_local - timedelta(milliseconds=time_diff_ms)
+    print(f"Now Local: {now_local}")
+    print(f"Now Used: {now}")
 
-end = now
-start = now - timedelta(minutes=execution_minutes)
+    end = now
+    start = now - timedelta(minutes=execution_minutes)
 
-#print(start.isoformat().replace('+00:00', 'Z'))
-#print(end.isoformat().replace('+00:00', 'Z'))
+    #print(start.isoformat().replace('+00:00', 'Z'))
+    #print(end.isoformat().replace('+00:00', 'Z'))
 
-response = requests.get(prometheus_query_path, params={
-    #'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
-    'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        # 'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+        'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
+    # response
+    # print(response.request.path_url)
+    # response.content
+    results = response.json()['data']['result']
 
-#response
-#print(response.request.path_url)
-#response.content
-results = response.json()['data']['result']
+    d = []
 
-d = []
+    for result in results:
+        # print(result['metric']['topic'])
+        topic = result['metric']['topic']
+        for value in result['values']:
+            # print(value)
+            d.append({'topic': topic, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
 
-for result in results:
-    #print(result['metric']['topic'])
-    topic = result['metric']['topic']
-    for value in result['values']:
-        #print(value)
-        d.append({'topic': topic, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+    df = pd.DataFrame(d)
 
-df = pd.DataFrame(d)
+    # Do some analysis
 
-# Do some analysis
+    input = df.loc[df['topic'] == "input"]
 
-input = df.loc[df['topic'] == "input"]
+    # input.plot(kind='line',x='timestamp',y='value',color='red')
+    # plt.show()
 
-#input.plot(kind='line',x='timestamp',y='value',color='red')
-#plt.show()
+    from sklearn.linear_model import LinearRegression
 
-from sklearn.linear_model import LinearRegression
+    # values converts it into a numpy array
+    X = input.iloc[:, 1].values.reshape(-1, 1)
+    # -1 means that calculate the dimension of rows, but have 1 column
+    Y = input.iloc[:, 2].values.reshape(-1, 1)
+    linear_regressor = LinearRegression()  # create object for the class
+    linear_regressor.fit(X, Y)  # perform linear regression
+    Y_pred = linear_regressor.predict(X)  # make predictions
 
-X = input.iloc[:, 1].values.reshape(-1, 1)  # values converts it into a numpy array
-Y = input.iloc[:, 2].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
-linear_regressor = LinearRegression()  # create object for the class
-linear_regressor.fit(X, Y)  # perform linear regression
-Y_pred = linear_regressor.predict(X)  # make predictions
+    print(linear_regressor.coef_)
 
-print(linear_regressor.coef_)
+    # print(Y_pred)
 
-#print(Y_pred)
+    fields = [exp_id, datetime.now(), benchmark, dim_value,
+              instances, linear_regressor.coef_]
+    print(fields)
+    with open(r'results.csv', 'a') as f:
+        writer = csv.writer(f)
+        writer.writerow(fields)
 
-fields=[exp_id, datetime.now(), benchmark, dim_value, instances, linear_regressor.coef_]
-print(fields)
-with open(r'results.csv', 'a') as f:
-    writer = csv.writer(f)
-    writer.writerow(fields)
+    filename = f"exp{exp_id}_{benchmark}_{dim_value}_{instances}"
 
-filename = f"exp{exp_id}_{benchmark}_{dim_value}_{instances}"
+    plt.plot(X, Y)
+    plt.plot(X, Y_pred, color='red')
 
-plt.plot(X, Y)
-plt.plot(X, Y_pred, color='red')
+    plt.savefig(f"{filename}_plot.png")
 
-plt.savefig(f"{filename}_plot.png")
+    df.to_csv(f"{filename}_values.csv")
 
-df.to_csv(f"{filename}_values.csv")
+    # Load total lag count
 
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
 
-# Load total lag count
+    results = response.json()['data']['result']
 
-response = requests.get(prometheus_query_path, params={
-    'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    d = []
 
-results = response.json()['data']['result']
+    for result in results:
+        # print(result['metric']['topic'])
+        group = result['metric']['group']
+        for value in result['values']:
+            # print(value)
+            d.append({'group': group, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
 
-d = []
+    df = pd.DataFrame(d)
 
-for result in results:
-    #print(result['metric']['topic'])
-    group = result['metric']['group']
-    for value in result['values']:
-        #print(value)
-        d.append({'group': group, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+    df.to_csv(f"{filename}_totallag.csv")
 
-df = pd.DataFrame(d)
+    # Load partition count
 
-df.to_csv(f"{filename}_totallag.csv")
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
 
-# Load partition count
+    results = response.json()['data']['result']
 
-response = requests.get(prometheus_query_path, params={
-    'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    d = []
 
-results = response.json()['data']['result']
+    for result in results:
+        # print(result['metric']['topic'])
+        topic = result['metric']['topic']
+        for value in result['values']:
+            # print(value)
+            d.append({'topic': topic, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
 
-d = []
+    df = pd.DataFrame(d)
 
-for result in results:
-    #print(result['metric']['topic'])
-    topic = result['metric']['topic']
-    for value in result['values']:
-        #print(value)
-        d.append({'topic': topic, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+    df.to_csv(f"{filename}_partitions.csv")
 
-df = pd.DataFrame(d)
+    # Load instances count
 
-df.to_csv(f"{filename}_partitions.csv")
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
 
+    results = response.json()['data']['result']
 
-# Load instances count
+    d = []
 
-response = requests.get(prometheus_query_path, params={
-    'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    for result in results:
+        for value in result['values']:
+            # print(value)
+            d.append({'timestamp': int(value[0]), 'value': int(value[1])})
 
-results = response.json()['data']['result']
+    df = pd.DataFrame(d)
 
-d = []
+    df.to_csv(f"{filename}_instances.csv")
 
-for result in results:
-    for value in result['values']:
-        #print(value)
-        d.append({'timestamp': int(value[0]), 'value': int(value[1])})
 
-df = pd.DataFrame(d)
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.INFO)
 
-df.to_csv(f"{filename}_instances.csv")
\ No newline at end of file
+    # Load arguments
+    exp_id = sys.argv[1]
+    benchmark = sys.argv[2]
+    dim_value = sys.argv[3]
+    instances = sys.argv[4]
+    execution_minutes = int(sys.argv[5])
+
+    main(exp_id, benchmark, dim_value, instances, execution_minutes)
diff --git a/execution/requirements.txt b/execution/requirements.txt
index 7224efe80aa1686bb3de90b2beac5df47a56ed8f..18a06882007eebf69bf3bf4f84b869454b36a0a6 100644
--- a/execution/requirements.txt
+++ b/execution/requirements.txt
@@ -1,4 +1,8 @@
 matplotlib==3.2.0
 pandas==1.0.1
 requests==2.23.0
-scikit-learn==0.22.2.post1
\ No newline at end of file
+scikit-learn==0.22.2.post1
+
+# For run_uc.py
+kubernetes==11.0.0
+confuse==1.1.0
diff --git a/execution/run_uc.py b/execution/run_uc.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c490afcd8307824874cc8eb604d7cdb992dc447
--- /dev/null
+++ b/execution/run_uc.py
@@ -0,0 +1,552 @@
+import argparse  # parse arguments from cli
+import atexit # used to clear resources at exit of program (e.g. ctrl-c)
+from kubernetes import client, config  # kubernetes api
+from kubernetes.stream import stream
+import lag_analysis
+import logging  # logging
+from os import path  # path utilities
+import subprocess  # execute bash commands
+import sys # for exit of program
+import time  # process sleep
+import yaml  # convert from file to yaml object
+
+coreApi = None  # acces kubernetes core api
+appsApi = None  # acces kubernetes apps api
+customApi = None  # acces kubernetes custom object api
+args = None  # CLI arguments
+
+
+def load_variables():
+    """Load the CLI variables given at the command line"""
+    global args
+    print('Load CLI variables')
+    parser = argparse.ArgumentParser(description='Run use case Programm')
+    parser.add_argument('--exp-id', '-id',
+                        dest='exp_id',
+                        default='1',
+                        metavar='EXP_ID',
+                        help='ID of the experiment')
+    parser.add_argument('--use-case', '-uc',
+                        dest='uc_id',
+                        default='1',
+                        metavar='UC_NUMBER',
+                        help='use case number, one of 1, 2, 3 or 4')
+    parser.add_argument('--dim-value', '-d',
+                        dest='dim_value',
+                        default=10000,
+                        type=int,
+                        metavar='DIM_VALUE',
+                        help='Value for the workload generator to be tested')
+    parser.add_argument('--instances', '-i',
+                        dest='instances',
+                        default=1,
+                        type=int,
+                        metavar='INSTANCES',
+                        help='Numbers of instances to be benchmarked')
+    parser.add_argument('--partitions', '-p',
+                        dest='partitions',
+                        default=40,
+                        type=int,
+                        metavar='PARTITIONS',
+                        help='Number of partitions for Kafka topics')
+    parser.add_argument('--cpu-limit', '-cpu',
+                        dest='cpu_limit',
+                        default='1000m',
+                        metavar='CPU_LIMIT',
+                        help='Kubernetes CPU limit')
+    parser.add_argument('--memory-limit', '-mem',
+                        dest='memory_limit',
+                        default='4Gi',
+                        metavar='MEMORY_LIMIT',
+                        help='Kubernetes memory limit')
+    parser.add_argument('--commit-interval', '-ci',
+                        dest='commit_interval_ms',
+                        default=100,
+                        type=int,
+                        metavar='KAFKA_STREAMS_COMMIT_INTERVAL_MS',
+                        help='Kafka Streams commit interval in milliseconds')
+    parser.add_argument('--executions-minutes', '-exm',
+                        dest='execution_minutes',
+                        default=5,
+                        type=int,
+                        metavar='EXECUTION_MINUTES',
+                        help='Duration in minutes subexperiments should be \
+                                executed for')
+    parser.add_argument('--reset', '-res',
+                        dest='reset',
+                        action="store_true",
+                        help='Resets the environment before execution')
+    parser.add_argument('--reset-only', '-reso',
+                        dest='reset_only',
+                        action="store_true",
+                        help='Only resets the environment. Ignores all other parameters')
+
+    args = parser.parse_args()
+    print(args)
+
+
+def initialize_kubernetes_api():
+    """Load the kubernetes config from local or the cluster and creates
+    needed APIs.
+    """
+    global coreApi, appsApi, customApi
+    print('Connect to kubernetes api')
+    try:
+        config.load_kube_config()  # try using local config
+    except config.config_exception.ConfigException as e:
+        # load config from pod, if local config is not available
+        logging.debug('Failed loading local Kubernetes configuration,'
+                      + ' try from cluster')
+        logging.debug(e)
+        config.load_incluster_config()
+
+    coreApi = client.CoreV1Api()
+    appsApi = client.AppsV1Api()
+    customApi = client.CustomObjectsApi()
+
+
+def create_topics(topics):
+    """Create the topics needed for the use cases
+    :param topics: List of topics that should be created.
+    """
+    # Calling exec and waiting for response
+    print('Create topics')
+    for (topic, partitions) in topics:
+        print('Create topic ' + topic + ' with #' + str(partitions)
+              + ' partitions')
+        exec_command = [
+            '/bin/sh',
+            '-c',
+            f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181\
+            --create --topic {topic} --partitions {partitions}\
+            --replication-factor 1'
+        ]
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "kafka-client",
+                      'default',
+                      command=exec_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        print(resp)
+
+
+def load_yaml(file_path):
+    """Creates a yaml file from the file at given path.
+    :param file_path: The path to the file which contains the yaml.
+    :return: The file as a yaml object.
+    """
+    try:
+        f = open(path.join(path.dirname(__file__), file_path))
+        with f:
+            return yaml.safe_load(f)
+    except Exception as e:
+        logging.error('Error opening file %s' % file_path)
+        logging.error(e)
+
+
+def load_yaml_files():
+    """Load the needed yaml files and creates objects from them.
+    :return: wg, app_svc, app_svc_monitor ,app_jmx, app_deploy
+    """
+    print('Load kubernetes yaml files')
+    wg = load_yaml('uc-workload-generator/base/workloadGenerator.yaml')
+    app_svc = load_yaml('uc-application/base/aggregation-service.yaml')
+    app_svc_monitor = load_yaml('uc-application/base/service-monitor.yaml')
+    app_jmx = load_yaml('uc-application/base/jmx-configmap.yaml')
+    app_deploy = load_yaml('uc-application/base/aggregation-deployment.yaml')
+
+    print('Kubernetes yaml files loaded')
+    return wg, app_svc, app_svc_monitor, app_jmx, app_deploy
+
+
+def start_workload_generator(wg_yaml):
+    """Starts the workload generator.
+    :param wg_yaml: The yaml object for the workload generator.
+    :return:
+        The StatefulSet created by the API or in case it already exist/error
+        the yaml object.
+    """
+    print('Start workload generator')
+
+    num_sensors = args.dim_value
+    wl_max_records = 150000
+    wl_instances = int(((num_sensors + (wl_max_records - 1)) / wl_max_records))
+
+    # set parameters special for uc 2
+    if args.uc_id == '2':
+        print('use uc2 stuff')
+        num_nested_groups = args.dim_value
+        num_sensors = '4'
+        approx_num_sensors = int(num_sensors) ** num_nested_groups
+        wl_instances = int(
+            ((approx_num_sensors + wl_max_records - 1) / wl_max_records)
+        )
+
+    # Customize workload generator creations
+    wg_yaml['spec']['replicas'] = wl_instances
+    # TODO: acces over name of container
+    # Set used use case
+    wg_containter = wg_yaml['spec']['template']['spec']['containers'][0]
+    wg_containter['image'] = 'theodolite/theodolite-uc' + args.uc_id + \
+        '-workload-generator:latest'
+    # TODO: acces over name of attribute
+    # Set environment variables
+    wg_containter['env'][0]['value'] = str(num_sensors)
+    wg_containter['env'][1]['value'] = str(wl_instances)
+    if args.uc_id == '2':  # Special configuration for uc2
+        wg_containter['env'][2]['value'] = str(num_nested_groups)
+
+    try:
+        wg_ss = appsApi.create_namespaced_deployment(
+            namespace="default",
+            body=wg_yaml
+        )
+        print("Deployment '%s' created." % wg_ss.metadata.name)
+        return wg_ss
+    except client.rest.ApiException as e:
+        print("Deployment creation error: %s" % e.reason)
+        return wg_yaml
+
+
+def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml):
+    """Applies the service, service monitor, jmx config map and start the
+    use case application.
+
+    :param svc_yaml: The yaml object for the service.
+    :param svc_monitor_yaml: The yaml object for the service monitor.
+    :param jmx_yaml: The yaml object for the jmx config map.
+    :param deploy_yaml: The yaml object for the application.
+    :return:
+        The Service, ServiceMonitor, JMX ConfigMap and Deployment.
+        In case the resource already exist/error the yaml object is returned.
+        return svc, svc_monitor, jmx_cm, app_deploy
+    """
+    print('Start use case application')
+    svc, svc_monitor, jmx_cm, app_deploy = None, None, None, None
+
+    # Create Service
+    try:
+        svc = coreApi.create_namespaced_service(
+            namespace="default", body=svc_yaml)
+        print("Service '%s' created." % svc.metadata.name)
+    except client.rest.ApiException as e:
+        svc = svc_yaml
+        logging.error("Service creation error: %s" % e.reason)
+
+    # Create custom object service monitor
+    try:
+        svc_monitor = customApi.create_namespaced_custom_object(
+            group="monitoring.coreos.com",
+            version="v1",
+            namespace="default",
+            plural="servicemonitors",  # CustomResourceDef of ServiceMonitor
+            body=svc_monitor_yaml,
+        )
+        print("ServiceMonitor '%s' created." % svc_monitor['metadata']['name'])
+    except client.rest.ApiException as e:
+        svc_monitor = svc_monitor_yaml
+        logging.error("ServiceMonitor creation error: %s" % e.reason)
+
+    # Apply jmx config map for aggregation service
+    try:
+        jmx_cm = coreApi.create_namespaced_config_map(
+            namespace="default", body=jmx_yaml)
+        print("ConfigMap '%s' created." % jmx_cm.metadata.name)
+    except client.rest.ApiException as e:
+        jmx_cm = jmx_yaml
+        logging.error("ConfigMap creation error: %s" % e.reason)
+
+    # Create deployment
+    deploy_yaml['spec']['replicas'] = args.instances
+    # TODO: acces over name of container
+    app_container = deploy_yaml['spec']['template']['spec']['containers'][0]
+    app_container['image'] = 'theodolite/theodolite-uc' + args.uc_id \
+        + '-kstreams-app:latest'
+    # TODO: acces over name of attribute
+    app_container['env'][0]['value'] = str(args.commit_interval_ms)
+    app_container['resources']['limits']['memory'] = args.memory_limit
+    app_container['resources']['limits']['cpu'] = args.cpu_limit
+    try:
+        app_deploy = appsApi.create_namespaced_deployment(
+            namespace="default",
+            body=deploy_yaml
+        )
+        print("Deployment '%s' created." % app_deploy.metadata.name)
+    except client.rest.ApiException as e:
+        app_deploy = deploy_yaml
+        logging.error("Deployment creation error: %s" % e.reason)
+
+    return svc, svc_monitor, jmx_cm, app_deploy
+
+
+def wait_execution():
+    """Wait time while in execution."""
+    print('Wait while executing')
+    # TODO: ask which fits better
+    # time.sleep(args.execution_minutes * 60)
+    for i in range(args.execution_minutes):
+        time.sleep(60)
+        print(f"Executed: {i+1} minutes")
+    print('Execution finished')
+    return
+
+
+def run_evaluation():
+    """Runs the evaluation function"""
+    print('Run evaluation function')
+    lag_analysis.main(args.exp_id, f'uc{args.uc_id}', args.dim_value, args.instances, args.execution_minutes)
+    return
+
+
+def delete_resource(obj, del_func):
+    """
+    Helper function to delete kuberentes resources.
+    First tries to delete with the kuberentes object.
+    Then it uses the dict representation of yaml to delete the object.
+    :param obj: Either kubernetes resource object or yaml as a dict.
+    :param del_func: The function that need to be executed for deletion
+    """
+    try:
+        del_func(obj.metadata.name, 'default')
+    except Exception as e:
+        logging.debug('Error deleting resource with api object, try with dict.')
+        try:
+            del_func(obj['metadata']['name'], 'default')
+        except Exception as e:
+            logging.error("Error deleting resource")
+            logging.error(e)
+            return
+    print('Resource deleted')
+
+
+def stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy):
+    """Stops the applied applications and delete resources.
+    :param wg: The workload generator statefull set.
+    :param app_svc: The application service.
+    :param app_svc_monitor: The application service monitor.
+    :param app_jmx: The application jmx config map.
+    :param app_deploy: The application deployment.
+    """
+    print('Stop use case application and workload generator')
+
+    print('Delete workload generator')
+    delete_resource(wg, appsApi.delete_namespaced_deployment)
+
+    print('Delete app service')
+    delete_resource(app_svc, coreApi.delete_namespaced_service)
+
+    print('Delete service monitor')
+    try:
+        customApi.delete_namespaced_custom_object(
+            group="monitoring.coreos.com",
+            version="v1",
+            namespace="default",
+            plural="servicemonitors",
+            name=app_svc_monitor['metadata']['name'])
+        print('Resource deleted')
+    except Exception as e:
+        print("Error deleting service monitor")
+
+    print('Delete jmx config map')
+    delete_resource(app_jmx, coreApi.delete_namespaced_config_map)
+
+    print('Delete uc application')
+    delete_resource(app_deploy, appsApi.delete_namespaced_deployment)
+    return
+
+
+def delete_topics(topics):
+    """Delete topics from Kafka.
+    :param topics: List of topics to delete.
+    """
+    print('Delete topics from Kafka')
+
+    topics_delete = 'theodolite-.*|' + '|'.join([ti[0] for ti in topics])
+
+    num_topics_command = [
+        '/bin/sh',
+        '-c',
+        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list \
+        | sed -n -E "/^({topics_delete})\
+        ( - marked for deletion)?$/p" | wc -l'
+    ]
+
+    topics_deletion_command = [
+        '/bin/sh',
+        '-c',
+        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete \
+        --topic "{topics_delete}"'
+    ]
+
+    # Wait that topics get deleted
+    while True:
+        # topic deletion, sometimes a second deletion seems to be required
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "kafka-client",
+                      'default',
+                      command=topics_deletion_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        print(resp)
+
+        print('Wait for topic deletion')
+        time.sleep(2)
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "kafka-client",
+                      'default',
+                      command=num_topics_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        if resp == '0':
+            print("Topics deleted")
+            break
+    return
+
+
+def reset_zookeeper():
+    """Delete ZooKeeper configurations used for workload generation.
+    """
+    print('Delete ZooKeeper configurations used for workload generation')
+
+    delete_zoo_data_command = [
+        'kubectl',
+        'exec',
+        'zookeeper-client',
+        '--',
+        'bash',
+        '-c',
+        'zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall '
+        + '/workload-generation'
+    ]
+
+    check_zoo_data_command = [
+        'kubectl',
+        'exec',
+        'zookeeper-client',
+        '--',
+        'bash',
+        '-c',
+        'zookeeper-shell my-confluent-cp-zookeeper:2181 get '
+        + '/workload-generation'
+    ]
+
+    # Wait for configuration deletion
+    while True:
+        # Delete Zookeeper configuration data
+        output = subprocess.run(delete_zoo_data_command,
+                                capture_output=True,
+                                text=True)
+        logging.debug(output.stdout)
+
+        # Check data is deleted
+        output = subprocess.run(check_zoo_data_command,
+                                capture_output=True,
+                                text=True)
+        logging.debug(output)
+
+        if output.returncode == 1: # Means data not available anymore
+            print('ZooKeeper reset was successful.')
+            break
+        else:
+            print('ZooKeeper reset was not successful. Retrying in 5s.')
+            time.sleep(5)
+    return
+
+
+def stop_lag_exporter():
+    """
+    Stop the lag exporter in order to reset it and allow smooth execution for
+    next use cases.
+    """
+    print('Stop the lag exporter')
+
+    find_pod_command = [
+        'kubectl',
+        'get',
+        'pod',
+        '-l',
+        'app.kubernetes.io/name=kafka-lag-exporter',
+        '-o',
+        'jsonpath="{.items[0].metadata.name}"'
+    ]
+    output = subprocess.run(find_pod_command, capture_output=True, text=True)
+    lag_exporter_pod = output.stdout.replace('"', '')
+    delete_pod_command = [
+        'kubectl',
+        'delete',
+        'pod',
+        lag_exporter_pod
+    ]
+    output = subprocess.run(delete_pod_command, capture_output=True, text=True)
+    print(output)
+    return
+
+
+def reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics):
+    """
+    Stop the applications, delete topics, reset zookeeper and stop lag exporter.
+    """
+    print('Reset cluster')
+    stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy)
+    print('---------------------')
+    delete_topics(topics)
+    print('---------------------')
+    reset_zookeeper()
+    print('---------------------')
+    stop_lag_exporter()
+
+def main():
+    load_variables()
+    print('---------------------')
+
+    wg, app_svc, app_svc_monitor, app_jmx, app_deploy = load_yaml_files()
+    print('---------------------')
+
+    initialize_kubernetes_api()
+    print('---------------------')
+
+    topics = [('input', args.partitions),
+              ('output', args.partitions),
+              ('aggregation-feedback', args.partitions),
+              ('configuration', 1)]
+
+    # Check for reset options
+    if args.reset_only:
+        # Only reset cluster an then end program
+        reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
+        sys.exit()
+    if args.reset:
+        # Reset cluster before execution
+        print('Reset only mode')
+        reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
+        print('---------------------')
+
+    # Register the reset operation so that is executed at the end of program
+    atexit.register(reset_cluster, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
+
+    create_topics(topics)
+    print('---------------------')
+
+    wg = start_workload_generator(wg)
+    print('---------------------')
+
+    app_svc, app_svc_monitor, app_jmx, app_deploy = start_application(
+        app_svc,
+        app_svc_monitor,
+        app_jmx,
+        app_deploy)
+    print('---------------------')
+
+    wait_execution()
+    print('---------------------')
+
+    run_evaluation()
+    print('---------------------')
+
+    # Cluster is resetted with atexit method
+    # reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
+
+
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.INFO)
+    main()
diff --git a/execution/run_uc1.sh b/execution/run_uc1.sh
index e6a3eb05ed7cca167ccbc9ae8c3d5cbc9803e000..02c46d8832fc800c57453570b14a6bf02681326a 100755
--- a/execution/run_uc1.sh
+++ b/execution/run_uc1.sh
@@ -29,38 +29,59 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc1-workload-generator
 
 # Start application
 REPLICAS=$INSTANCES
-# When not using `sed` anymore, use `kubectl apply -f uc1-application`
-kubectl apply -f uc1-application/aggregation-service.yaml
-kubectl apply -f uc1-application/jmx-configmap.yaml
-kubectl apply -f uc1-application/service-monitor.yaml
-#kubectl apply -f uc1-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc1-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc1-application
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#kubectl delete -f uc1-workload-generator/deployment.yaml
-#sed "s/{{INSTANCES}}/1/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
-#sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc1-application/aggregation-service.yaml
-kubectl delete -f uc1-application/jmx-configmap.yaml
-kubectl delete -f uc1-application/service-monitor.yaml
-#kubectl delete -f uc1-application/aggregation-deployment.yaml
-echo "$APPLICATION_YAML" | kubectl delete -f -
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc1-workload-generator
+kubectl delete -k uc-application/overlay/uc1-application
 
 
 # Delete topics instead of Kafka
diff --git a/execution/run_uc2.sh b/execution/run_uc2.sh
index 76d76cd4dc45b3b5e26ea4033c7afd58268fd3fb..4544d3609ed807141455378b92ce3536ea2f92f6 100755
--- a/execution/run_uc2.sh
+++ b/execution/run_uc2.sh
@@ -30,36 +30,63 @@ WL_MAX_RECORDS=150000
 APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS))
 WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc2-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "4"
+        - name: HIERARCHY
+          value: "full"
+        - name: NUM_NESTED_GROUPS
+          value: "$NUM_NESTED_GROUPS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc2-workload-generator
 
 # Start application
 REPLICAS=$INSTANCES
-# When not using `sed` anymore, use `kubectl apply -f uc2-application`
-kubectl apply -f uc2-application/aggregation-service.yaml
-kubectl apply -f uc2-application/jmx-configmap.yaml
-kubectl apply -f uc2-application/service-monitor.yaml
-#kubectl apply -f uc2-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc2-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc2-application
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#sed "s/{{INSTANCES}}/1/g" uc2-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc2-application/aggregation-service.yaml
-kubectl delete -f uc2-application/jmx-configmap.yaml
-kubectl delete -f uc2-application/service-monitor.yaml
-#kubectl delete -f uc2-application/aggregation-deployment.yaml
-echo "$APPLICATION_YAML" | kubectl delete -f -
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc2-workload-generator
+kubectl delete -k uc-application/overlay/uc2-application
 
 
 # Delete topics instead of Kafka
diff --git a/execution/run_uc3.sh b/execution/run_uc3.sh
index 1e34aea99fdc7a927e1943a397f02e1bb56f6a74..4f2323f937f19d01a73482dea6aeaf5e922a0a3f 100755
--- a/execution/run_uc3.sh
+++ b/execution/run_uc3.sh
@@ -29,40 +29,61 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc3-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc3-workload-generator
+
 
 # Start application
 REPLICAS=$INSTANCES
-# When not using `sed` anymore, use `kubectl apply -f uc3-application`
-kubectl apply -f uc3-application/aggregation-service.yaml
-kubectl apply -f uc3-application/jmx-configmap.yaml
-kubectl apply -f uc3-application/service-monitor.yaml
-#kubectl apply -f uc3-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc3-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc3-application
+kubectl scale deployment uc3-titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#kubectl delete -f uc3-workload-generator/deployment.yaml
-#sed "s/{{INSTANCES}}/1/g" uc3-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc3-application/aggregation-service.yaml
-kubectl delete -f uc3-application/jmx-configmap.yaml
-kubectl delete -f uc3-application/service-monitor.yaml
-#kubectl delete -f uc3-application/aggregation-deployment.yaml
-#sed "s/{{CPU_LIMIT}}/1000m/g; s/{{MEMORY_LIMIT}}/4Gi/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/100/g" uc3-application/aggregation-deployment.yaml | kubectl delete -f -
-echo "$APPLICATION_YAML" | kubectl delete -f -
-
-
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc3-workload-generator
+kubectl delete -k uc-application/overlay/uc3-application
 
 # Delete topics instead of Kafka
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
diff --git a/execution/run_uc4.sh b/execution/run_uc4.sh
index bfd3ed8e2b970b12c5835ba5bcd8ea2dace0d84b..08a38498839ef3c50a39c1ccfbd26914993ffbd3 100755
--- a/execution/run_uc4.sh
+++ b/execution/run_uc4.sh
@@ -29,39 +29,60 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc4-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uuc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc4-workload-generator
 
 # Start application
 REPLICAS=$INSTANCES
-#AGGREGATION_DURATION_DAYS=$DIM_VALUE
-# When not using `sed` anymore, use `kubectl apply -f uc4-application`
-kubectl apply -f uc4-application/aggregation-service.yaml
-kubectl apply -f uc4-application/jmx-configmap.yaml
-kubectl apply -f uc4-application/service-monitor.yaml
-#kubectl apply -f uc4-application/aggregation-deployment.yaml
-#sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc4-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc4-application
+kubectl scale deployment uc4-titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#sed "s/{{INSTANCES}}/1/g" uc4-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc4-application/aggregation-service.yaml
-kubectl delete -f uc4-application/jmx-configmap.yaml
-kubectl delete -f uc4-application/service-monitor.yaml
-#kubectl delete -f uc4-application/aggregation-deployment.yaml
-echo "$APPLICATION_YAML" | kubectl delete -f -
-
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc4-workload-generator
+kubectl delete -k uc-application/overlay/uc4-application
 
 # Delete topics instead of Kafka
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc-application/base/aggregation-deployment.yaml
similarity index 87%
rename from execution/uc1-application/aggregation-deployment.yaml
rename to execution/uc-application/base/aggregation-deployment.yaml
index bcb0a955de0d5ce64fe6bdcba1e537468c833e5b..81da3eea7688f5d3b3145092d91cb8502e6ad87b 100644
--- a/execution/uc1-application/aggregation-deployment.yaml
+++ b/execution/uc-application/base/aggregation-deployment.yaml
@@ -14,24 +14,24 @@ spec:
     spec:
       terminationGracePeriodSeconds: 0
       containers:
-      - name: uc1-application
-        image: "theodolite/theodolite-uc1-kstreams-app:latest"
+      - name: uc-application
+        image: uc-app:latest
         ports:
         - containerPort: 5555
           name: jmx
         env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: SCHEMA_REGISTRY_URL
           value: "http://my-confluent-cp-schema-registry:8081"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
         resources:
           limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
+            memory: 4Gi
+            cpu: 1000m
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
diff --git a/execution/uc2-application/aggregation-service.yaml b/execution/uc-application/base/aggregation-service.yaml
similarity index 86%
rename from execution/uc2-application/aggregation-service.yaml
rename to execution/uc-application/base/aggregation-service.yaml
index 85432d04f225c30469f3232153ef6bd72bd02bdf..6317caf9fe624e42449b8f630d040a068709cda3 100644
--- a/execution/uc2-application/aggregation-service.yaml
+++ b/execution/uc-application/base/aggregation-service.yaml
@@ -1,14 +1,14 @@
 apiVersion: v1
 kind: Service
-metadata:  
+metadata:
   name: titan-ccp-aggregation
   labels:
     app: titan-ccp-aggregation
 spec:
   #type: NodePort
-  selector:    
+  selector:
     app: titan-ccp-aggregation
-  ports:  
+  ports:
   - name: http
     port: 80
     targetPort: 80
diff --git a/execution/uc1-application/jmx-configmap.yaml b/execution/uc-application/base/jmx-configmap.yaml
similarity index 100%
rename from execution/uc1-application/jmx-configmap.yaml
rename to execution/uc-application/base/jmx-configmap.yaml
diff --git a/execution/uc-application/base/kustomization.yaml b/execution/uc-application/base/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..24c89cfdafb17cdc91f65198b9faf3665bfc6822
--- /dev/null
+++ b/execution/uc-application/base/kustomization.yaml
@@ -0,0 +1,12 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+commonLabels:
+  app: titan-ccp-aggregation
+
+# Use all resources to compose them into one file
+resources:
+  - aggregation-deployment.yaml
+  - aggregation-service.yaml
+  - service-monitor.yaml
+  - jmx-configmap.yaml
diff --git a/execution/uc1-application/service-monitor.yaml b/execution/uc-application/base/service-monitor.yaml
similarity index 100%
rename from execution/uc1-application/service-monitor.yaml
rename to execution/uc-application/base/service-monitor.yaml
diff --git a/execution/uc-application/overlay/uc1-application/kustomization.yaml b/execution/uc-application/overlay/uc1-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d3820fe392e1d2224d78a8dd2415c4dce37c6e6
--- /dev/null
+++ b/execution/uc-application/overlay/uc1-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc1-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc1-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc1-application/set_paramters.yaml b/execution/uc-application/overlay/uc1-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc1-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-application/overlay/uc2-application/kustomization.yaml b/execution/uc-application/overlay/uc2-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cd32cabf70fdfa666a5703c97bc4e4fad7800ba7
--- /dev/null
+++ b/execution/uc-application/overlay/uc2-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc2-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc2-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc2-application/set_paramters.yaml b/execution/uc-application/overlay/uc2-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc2-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-application/overlay/uc3-application/kustomization.yaml b/execution/uc-application/overlay/uc3-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5722cbca8cc79247063921a55252435804edefe6
--- /dev/null
+++ b/execution/uc-application/overlay/uc3-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc3-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc3-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc3-application/set_paramters.yaml b/execution/uc-application/overlay/uc3-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc3-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-application/overlay/uc4-application/kustomization.yaml b/execution/uc-application/overlay/uc4-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b44a9bb643802735b740b74bdb47299fb413e5d3
--- /dev/null
+++ b/execution/uc-application/overlay/uc4-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc4-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc4-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc4-application/set_paramters.yaml b/execution/uc-application/overlay/uc4-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc4-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-workload-generator/base/kustomization.yaml b/execution/uc-workload-generator/base/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2a2c3de74db5afb7c70b440651b8c0c47720b755
--- /dev/null
+++ b/execution/uc-workload-generator/base/kustomization.yaml
@@ -0,0 +1,5 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+  - workloadGenerator.yaml
diff --git a/execution/uc2-workload-generator/deployment.yaml b/execution/uc-workload-generator/base/workloadGenerator.yaml
similarity index 81%
rename from execution/uc2-workload-generator/deployment.yaml
rename to execution/uc-workload-generator/base/workloadGenerator.yaml
index a7bf66f5e47a6fadfcd294366a3cfdefeaca656a..794468b18dc74ca09872577b5b3c115605bd4620 100644
--- a/execution/uc2-workload-generator/deployment.yaml
+++ b/execution/uc-workload-generator/base/workloadGenerator.yaml
@@ -6,7 +6,7 @@ spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
+  replicas: 1
   template:
     metadata:
       labels:
@@ -15,8 +15,15 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: theodolite/theodolite-uc2-workload-generator:latest
+        image: workload-generator:latest
         env:
+        # Order need to be preserved for run_uc.py
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
+        - name: NUM_NESTED_GROUPS
+          value: "5"
         - name: ZK_HOST
           value: "my-confluent-cp-zookeeper"
         - name: ZK_PORT
@@ -25,13 +32,7 @@ spec:
           value: "my-confluent-cp-kafka:9092"
         - name: SCHEMA_REGISTRY_URL
           value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "4"
-        - name: NUM_NESTED_GROUPS
-          value: "{{NUM_NESTED_GROUPS}}"
         - name: POD_NAME
           valueFrom:
             fieldRef:
               fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..553b769a3bacd3356d6b5af5ba2e865acdd47a7c
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc1-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc1-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b275607c27723b1e7e5e7e2b5c02942731bed809
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ff68743355d55459f2df988e8dd42bf0b3b6ae64
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc2-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc2-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..187cb4717195537288e58035dcdda5f34fc9ceed
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
@@ -0,0 +1,19 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "4"
+        - name: HIERARCHY
+          value: "full"
+        - name: NUM_NESTED_GROUPS
+          value: "5"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a7022480fcfe401f3e4e4c3898c3d79930198d3e
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc3-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc3-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b275607c27723b1e7e5e7e2b5c02942731bed809
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5efb0eb25a26371cdddfcc7969a2d10131dbb448
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc4-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc4-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b275607c27723b1e7e5e7e2b5c02942731bed809
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc1-application/aggregation-service.yaml b/execution/uc1-application/aggregation-service.yaml
deleted file mode 100644
index 85432d04f225c30469f3232153ef6bd72bd02bdf..0000000000000000000000000000000000000000
--- a/execution/uc1-application/aggregation-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:  
-  name: titan-ccp-aggregation
-  labels:
-    app: titan-ccp-aggregation
-spec:
-  #type: NodePort
-  selector:    
-    app: titan-ccp-aggregation
-  ports:  
-  - name: http
-    port: 80
-    targetPort: 80
-    protocol: TCP
-  - name: metrics
-    port: 5556
diff --git a/execution/uc1-workload-generator/deployment.yaml b/execution/uc1-workload-generator/deployment.yaml
deleted file mode 100644
index e8326926e7bdb1b49be2d1c03f4a8e26ca77a2a6..0000000000000000000000000000000000000000
--- a/execution/uc1-workload-generator/deployment.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: theodolite/theodolite-uc1-workload-generator:latest
-        env:
-        - name: ZK_HOST
-          value: "my-confluent-cp-zookeeper"
-        - name: ZK_PORT
-          value: "2181"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "{{NUM_SENSORS}}"
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml
deleted file mode 100644
index 3eca4749ad1decbf9b3fd1973fcad94febf355d8..0000000000000000000000000000000000000000
--- a/execution/uc2-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc2-application
-        image: "theodolite/theodolite-uc2-kstreams-app:latest"
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        - name: LOG_LEVEL
-          value: "INFO"
-        resources:
-          limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc2-application/jmx-configmap.yaml b/execution/uc2-application/jmx-configmap.yaml
deleted file mode 100644
index 78496a86b1242a89b9e844ead3e700fd0b9a9667..0000000000000000000000000000000000000000
--- a/execution/uc2-application/jmx-configmap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aggregation-jmx-configmap
-data:
-  jmx-kafka-prometheus.yml: |+
-    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
-    lowercaseOutputName: true
-    lowercaseOutputLabelNames: true
-    ssl: false
diff --git a/execution/uc2-application/service-monitor.yaml b/execution/uc2-application/service-monitor.yaml
deleted file mode 100644
index 4e7e758cacb5086305efa26292ddef2afc958096..0000000000000000000000000000000000000000
--- a/execution/uc2-application/service-monitor.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  labels:
-    app: titan-ccp-aggregation
-    appScope: titan-ccp
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-        app: titan-ccp-aggregation
-  endpoints:
-    - port: metrics
-      interval: 10s
diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml
deleted file mode 100644
index a535b5b6443e89564d4bb0cbe17593c60dc289dc..0000000000000000000000000000000000000000
--- a/execution/uc3-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc3-application
-        image: "theodolite/theodolite-uc3-kstreams-app:latest"
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: KAFKA_WINDOW_DURATION_MINUTES
-          value: "1"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        resources:
-          limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc3-application/aggregation-service.yaml b/execution/uc3-application/aggregation-service.yaml
deleted file mode 100644
index 85432d04f225c30469f3232153ef6bd72bd02bdf..0000000000000000000000000000000000000000
--- a/execution/uc3-application/aggregation-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:  
-  name: titan-ccp-aggregation
-  labels:
-    app: titan-ccp-aggregation
-spec:
-  #type: NodePort
-  selector:    
-    app: titan-ccp-aggregation
-  ports:  
-  - name: http
-    port: 80
-    targetPort: 80
-    protocol: TCP
-  - name: metrics
-    port: 5556
diff --git a/execution/uc3-application/jmx-configmap.yaml b/execution/uc3-application/jmx-configmap.yaml
deleted file mode 100644
index 78496a86b1242a89b9e844ead3e700fd0b9a9667..0000000000000000000000000000000000000000
--- a/execution/uc3-application/jmx-configmap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aggregation-jmx-configmap
-data:
-  jmx-kafka-prometheus.yml: |+
-    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
-    lowercaseOutputName: true
-    lowercaseOutputLabelNames: true
-    ssl: false
diff --git a/execution/uc3-application/service-monitor.yaml b/execution/uc3-application/service-monitor.yaml
deleted file mode 100644
index 4e7e758cacb5086305efa26292ddef2afc958096..0000000000000000000000000000000000000000
--- a/execution/uc3-application/service-monitor.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  labels:
-    app: titan-ccp-aggregation
-    appScope: titan-ccp
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-        app: titan-ccp-aggregation
-  endpoints:
-    - port: metrics
-      interval: 10s
diff --git a/execution/uc3-workload-generator/deployment.yaml b/execution/uc3-workload-generator/deployment.yaml
deleted file mode 100644
index d323fd089eeaa4542db5a645fb3b08885b8eff26..0000000000000000000000000000000000000000
--- a/execution/uc3-workload-generator/deployment.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: theodolite/theodolite-uc3-workload-generator:latest
-        env:
-        - name: ZK_HOST
-          value: "my-confluent-cp-zookeeper"
-        - name: ZK_PORT
-          value: "2181"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "{{NUM_SENSORS}}"
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml
deleted file mode 100644
index 5f71737046e12b7f0116d59c4b55f0c0de39bbd2..0000000000000000000000000000000000000000
--- a/execution/uc4-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc4-application
-        image: "theodolite/theodolite-uc4-kstreams-app:latest"
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: AGGREGATION_DURATION_DAYS
-          value: "3" #AGGREGATION_DURATION_DAYS
-        - name: AGGREGATION_DURATION_ADVANCE
-          value: "1"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        resources:
-          limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc4-application/aggregation-service.yaml b/execution/uc4-application/aggregation-service.yaml
deleted file mode 100644
index 85432d04f225c30469f3232153ef6bd72bd02bdf..0000000000000000000000000000000000000000
--- a/execution/uc4-application/aggregation-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:  
-  name: titan-ccp-aggregation
-  labels:
-    app: titan-ccp-aggregation
-spec:
-  #type: NodePort
-  selector:    
-    app: titan-ccp-aggregation
-  ports:  
-  - name: http
-    port: 80
-    targetPort: 80
-    protocol: TCP
-  - name: metrics
-    port: 5556
diff --git a/execution/uc4-application/jmx-configmap.yaml b/execution/uc4-application/jmx-configmap.yaml
deleted file mode 100644
index 78496a86b1242a89b9e844ead3e700fd0b9a9667..0000000000000000000000000000000000000000
--- a/execution/uc4-application/jmx-configmap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aggregation-jmx-configmap
-data:
-  jmx-kafka-prometheus.yml: |+
-    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
-    lowercaseOutputName: true
-    lowercaseOutputLabelNames: true
-    ssl: false
diff --git a/execution/uc4-application/service-monitor.yaml b/execution/uc4-application/service-monitor.yaml
deleted file mode 100644
index 4e7e758cacb5086305efa26292ddef2afc958096..0000000000000000000000000000000000000000
--- a/execution/uc4-application/service-monitor.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  labels:
-    app: titan-ccp-aggregation
-    appScope: titan-ccp
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-        app: titan-ccp-aggregation
-  endpoints:
-    - port: metrics
-      interval: 10s
diff --git a/execution/uc4-workload-generator/deployment.yaml b/execution/uc4-workload-generator/deployment.yaml
deleted file mode 100644
index 98747b3922d439144e783b0e637cbe68e46f1b88..0000000000000000000000000000000000000000
--- a/execution/uc4-workload-generator/deployment.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: theodolite/theodolite-uc4-workload-generator:latest
-        env:
-        - name: ZK_HOST
-          value: "my-confluent-cp-zookeeper"
-        - name: ZK_PORT
-          value: "2181"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "{{NUM_SENSORS}}"
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"