diff --git a/.gitignore b/.gitignore
index 36f08fd69890d38fafe2b8bbf40d830773e737e0..71305e60a1056e58f281da4c2ab397539b63ba52 100644
--- a/.gitignore
+++ b/.gitignore
@@ -30,3 +30,6 @@ tmp/
 
 # Python Venv
 .venv
+
+# Python cache files
+*.pyc
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index f7e431002e7bf214f377b7458d2eba235b7b6050..0dda0bdb6be4434c91801cb6665364fb7fd63d6a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,247 +1,32 @@
-image: openjdk:11-jdk
-
-# Disable the Gradle daemon for Continuous Integration servers as correctness
-# is usually a priority over speed in CI environments. Using a fresh
-# runtime for each build is more reliable since the runtime is completely
-# isolated from any previous builds.
-variables:
-  GRADLE_OPTS: "-Dorg.gradle.daemon=false"
-
-cache:
-  paths:
-    - .gradle
-
-before_script:
-  - export GRADLE_USER_HOME=`pwd`/.gradle
+workflow:
+  rules:
+    - if: $CI_MERGE_REQUEST_ID
+      when: never
+    - when: always
 
 stages:
-  - build
-  - test
-  - check
-  - deploy
-
-build:
-  stage: build
-  tags:
-    - exec-docker
-  script: ./gradlew --build-cache assemble
-  artifacts:
-    paths:
-      - "build/libs/*.jar"
-      - "*/build/distributions/*.tar"
-    expire_in: 1 day
-
-test:
-  stage: test
-  tags:
-    - exec-docker
-  script: ./gradlew test --continue
-  artifacts:
-    reports:
-      junit:
-        - "**/build/test-results/test/TEST-*.xml"
-
-checkstyle:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew checkstyle --continue
-  allow_failure: true
-  artifacts:
-    paths:
-      - "*/build/reports/checkstyle/main.html"
-    when: on_failure
-    expire_in: 1 day
-
-pmd:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew pmd --continue
-  allow_failure: true
-  artifacts:
-    paths:
-      - "*/build/reports/pmd/*.html"
-    when: on_failure
-    expire_in: 1 day
+  - triggers
 
-spotbugs:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew spotbugs --continue
-  allow_failure: true
-  artifacts:
-    paths:
-      - "*/build/reports/spotbugs/*.html"
-    when: on_failure
-    expire_in: 1 day
-
-
-.deploy:
-  stage: deploy
-  tags:
-    - exec-dind
-  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
-  # for image usage and settings for building with TLS and docker in docker
-  image: docker:19.03.1
-  services:
-    - docker:19.03.1-dind
-  variables:
-    DOCKER_TLS_CERTDIR: "/certs"
-  script:
-    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
-    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
-    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
-    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
-    - docker logout
+benchmarks:
+  stage: triggers
+  trigger:
+    include: benchmarks/.gitlab-ci.yml
+    strategy: depend
   rules:
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc1-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc1-kstreams-app"
-    JAVA_PROJECT_NAME: "uc1-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc1-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc2-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc2-kstreams-app"
-    JAVA_PROJECT_NAME: "uc2-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
+    - if: "$CI_COMMIT_TAG"
     - changes:
-      - uc2-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
+      - benchmarks/*
+    - when: manual
       allow_failure: true
 
-deploy-uc3-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc3-kstreams-app"
-    JAVA_PROJECT_NAME: "uc3-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc3-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc4-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc4-kstreams-app"
-    JAVA_PROJECT_NAME: "uc4-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc4-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc1-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc1-workload-generator"
-    JAVA_PROJECT_NAME: "uc1-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc1-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc2-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc2-workload-generator"
-    JAVA_PROJECT_NAME: "uc2-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc2-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc3-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc3-workload-generator"
-    JAVA_PROJECT_NAME: "uc3-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc3-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc4-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc4-workload-generator"
-    JAVA_PROJECT_NAME: "uc4-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
+execution:
+  stage: triggers
+  trigger:
+    include: execution/.gitlab-ci.yml
+    strategy: depend
+  rules:
+    - if: "$CI_COMMIT_TAG"
     - changes:
-      - uc4-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
+      - execution/*
+    - when: manual
       allow_failure: true
diff --git a/README.md b/README.md
index 6ad1dd576bc165630fb378234102f324f9b66d8a..b1011530b67dad11da2e59e3decd400186f3ed5c 100644
--- a/README.md
+++ b/README.md
@@ -6,14 +6,14 @@ Theodolite is a framework for benchmarking the horizontal and vertical scalabili
 
 ## Theodolite Benchmarks
 
-Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Kafka Streams.
+Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams. Benchmark implementation for Apache Flink are currently under development and can be found in the *apache-flink* branch of this repository. The benchmark sources can be found in [Thedolite benchmarks](benchmarks).
 
 
 ## Theodolite Execution Framework
 
-Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys as components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
+Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
 
 
 ## Theodolite Analysis Tools
 
-Theodolite's benchmarking method create a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+Theodolite's benchmarking method creates a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
diff --git a/analysis/.dockerignore b/analysis/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..9a715f53b8129933fe1b20baa4af20772de3c872
--- /dev/null
+++ b/analysis/.dockerignore
@@ -0,0 +1,2 @@
+.dockerignore
+Dockerfile
\ No newline at end of file
diff --git a/analysis/Dockerfile b/analysis/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..1e396697f34f86e578890cbb68b7a8d40a21ebf8
--- /dev/null
+++ b/analysis/Dockerfile
@@ -0,0 +1,7 @@
+FROM jupyter/base-notebook
+
+COPY . /home/jovyan
+
+WORKDIR /home/jovyan
+RUN rm -r work
+RUN pip install -r requirements.txt
diff --git a/analysis/README.md b/analysis/README.md
index 263b1db16fcabefe5409ebe744afe5997bc90d89..8d37f01c011e74bf258e2d411bc72f32f0ddcfdc 100644
--- a/analysis/README.md
+++ b/analysis/README.md
@@ -3,20 +3,44 @@
 This directory contains Jupyter notebooks for analyzing and visualizing
 benchmark execution results and plotting. The following notebooks are provided:
 
+* [demand-metric.ipynb](demand-metric.ipynb): Create CSV files describing scalability according to the Theodolite `demand` metric.
+* [demand-metric-plot.ipynb](demand-metric-plot.ipynb): Create plots based on such CSV files of the `demand` metric.
+
+For legacy reasons, we also provide the following notebooks, which, however, are not documented:
+
 * [scalability-graph.ipynb](scalability-graph.ipynb): Creates a scalability graph for a certain benchmark execution.
-* [scalability-graph-final.ipynb](scalability-graph-final.ipynb): Combines the scalability graphs of multiple benchmarks executions (e.g. for comparing different configuration).
+* [scalability-graph-plotter.ipynb](scalability-graph-plotter.ipynb): Combines the scalability graphs of multiple benchmarks executions (e.g. for comparing different configuration).
 * [lag-trend-graph.ipynb](lag-trend-graph.ipynb): Visualizes the consumer lag evaluation over time along with the computed trend.
 
 ## Usage
 
-For executing benchmarks and analyzing their results, a **Python 3.7**
-installation is required (e.g., in a virtual environment). Our notebooks require some
-Python libraries, which can be installed via:
+In general, the Theodolite Analysis Jupyter notebooks should be runnable by any Jupyter server. To make it a bit easier,
+we provide introductions for running notebooks with Docker and with Visual Studio Code. These intoduction may also be
+a good starting point for using another service.
+
+For analyzing and visualizing benchmark results, either Docker or a Jupyter installation with Python 3.7 or 3.8 is
+required (e.g., in a virtual environment). **Please note that Python 3.9 seems not to be working as not all our
+dependencies are ported to Python 3.9 yet.**
+
+### Running with Docker
+
+This option requires Docker to be installed. You can build and run a container using the following commands. Make sure
+to set the `results` volume to the directory with your execution results and `results-inst` to a directory where the
+final scalability graphs should be placed. The output of the *run* command gives you an URL of the form
+`http://127.0.0.1:8888/?token=...`, which you should open in your webbrowser. From there you can access all notebooks.
+You can stop the Jupyter server with Crtl + C.
 
 ```sh
-pip install -r requirements.txt 
+docker build . -t theodolite-analysis
+docker run --rm -p 8888:8888 -v "$PWD/../results":/home/jovyan/results -v "$PWD/../results-inst":/home/jovyan/results-inst theodolite-analysis
 ```
 
-We have tested these
-notebooks with [Visual Studio Code](https://code.visualstudio.com/docs/python/jupyter-support),
-however, every other server should be fine as well.
+### Running with Visual Studio Code
+
+The [Visual Studio Code Documentation](https://code.visualstudio.com/docs/python/jupyter-support) shows to run Jupyter
+notebooks with Visual Studio Code. For our notebooks, Python 3.7 or newer is required (e.g., in a virtual environment).
+Moreover, they require some Python libraries, which can be installed by:
+
+```sh
+pip install -r requirements.txt
+```
\ No newline at end of file
diff --git a/analysis/demand-metric-plot.ipynb b/analysis/demand-metric-plot.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..985d1fc91caec847f1795234903d1cbb34e3ddba
--- /dev/null
+++ b/analysis/demand-metric-plot.ipynb
@@ -0,0 +1,173 @@
+{
+ "cells": [
+  {
+   "source": [
+    "# Theodolite Analysis - Plotting the Demand Metric\n",
+    "\n",
+    "This notebook creates a plot, showing scalability as a function that maps load intensities to the resources required for processing them. It is able to combine multiple such plots in one figure, for example, to compare multiple systems or configurations.\n",
+    "\n",
+    "The notebook takes a CSV file for each plot mapping load intensities to minimum required resources, computed by the `demand-metric-plot.ipynb` notebook."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "source": [
+    "First, we need to import some libraries, which are required for creating the plots."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import pandas as pd\n",
+    "from functools import reduce\n",
+    "import matplotlib.pyplot as plt\n",
+    "from matplotlib.ticker import FuncFormatter\n",
+    "from matplotlib.ticker import MaxNLocator"
+   ]
+  },
+  {
+   "source": [
+    "We need to specify the directory, where the demand CSV files can be found, and a dictionary that maps a system description (e.g. its name) to the corresponding CSV file (prefix). To use Unicode narrow non-breaking spaces in the description format it as `u\"1000\\u202FmCPU\"`."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "results_dir = '<path-to>/results'\n",
+    "\n",
+    "experiments = {\n",
+    "    'System XYZ': 'exp200',\n",
+    "}\n"
+   ]
+  },
+  {
+   "source": [
+    "Now, we combie all systems described in `experiments`."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dataframes = [pd.read_csv(os.path.join(results_dir, f'{v}_demand.csv')).set_index('load').rename(columns={\"resources\": k}) for k, v in experiments.items()]\n",
+    "\n",
+    "df = reduce(lambda df1,df2: df1.join(df2,how='outer'), dataframes)"
+   ]
+  },
+  {
+   "source": [
+    "We might want to display the mappings before we plot it."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "df"
+   ]
+  },
+  {
+   "source": [
+    "The following code creates a MatPlotLib figure showing the scalability plots for all specified systems. You might want to adjust its styling etc. according to your preferences. Make sure to also set a filename."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plt.style.use('ggplot')\n",
+    "plt.rcParams['axes.facecolor']='w'\n",
+    "plt.rcParams['axes.edgecolor']='555555'\n",
+    "#plt.rcParams['ytick.color']='black'\n",
+    "plt.rcParams['grid.color']='dddddd'\n",
+    "plt.rcParams['axes.spines.top']='false'\n",
+    "plt.rcParams['axes.spines.right']='false'\n",
+    "plt.rcParams['legend.frameon']='true'\n",
+    "plt.rcParams['legend.framealpha']='1'\n",
+    "plt.rcParams['legend.edgecolor']='1'\n",
+    "plt.rcParams['legend.borderpad']='1'\n",
+    "\n",
+    "@FuncFormatter\n",
+    "def load_formatter(x, pos):\n",
+    "    return f'{(x/1000):.0f}k'\n",
+    "\n",
+    "markers = ['s', 'D', 'o', 'v', '^', '<', '>', 'p', 'X']\n",
+    "\n",
+    "def splitSerToArr(ser):\n",
+    "    return [ser.index, ser.as_matrix()]\n",
+    "\n",
+    "plt.figure()\n",
+    "#plt.figure(figsize=(4.8, 3.6)) # For other plot sizes\n",
+    "#ax = df.plot(kind='line', marker='o')\n",
+    "for i, column in enumerate(df):\n",
+    "    plt.plot(df[column].dropna(), marker=markers[i], label=column)\n",
+    "plt.legend()\n",
+    "ax = plt.gca()\n",
+    "#ax = df.plot(kind='line',x='dim_value', legend=False, use_index=True)\n",
+    "ax.set_ylabel('number of instances')\n",
+    "ax.set_xlabel('messages/second')\n",
+    "ax.set_ylim(ymin=0)\n",
+    "#ax.set_xlim(xmin=0)\n",
+    "ax.yaxis.set_major_locator(MaxNLocator(integer=True))\n",
+    "ax.xaxis.set_major_formatter(FuncFormatter(load_formatter))\n",
+    "\n",
+    "plt.savefig('temp.pdf', bbox_inches='tight')"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python",
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "version": "3.8.5-final"
+  },
+  "orig_nbformat": 2,
+  "file_extension": ".py",
+  "mimetype": "text/x-python",
+  "name": "python",
+  "npconvert_exporter": "python",
+  "pygments_lexer": "ipython3",
+  "version": 3,
+  "kernelspec": {
+   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
+   "display_name": "Python 3.7.0 64-bit ('.venv': venv)",
+   "language": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/analysis/demand-metric.ipynb b/analysis/demand-metric.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..bcea129b7cb07465fa99f32b6f8b2b6115e8a0aa
--- /dev/null
+++ b/analysis/demand-metric.ipynb
@@ -0,0 +1,119 @@
+{
+ "cells": [
+  {
+   "source": [
+    "# Theodolite Analysis - Demand Metric\n",
+    "\n",
+    "This notebook applies Theodolite's *demand* metric to describe scalability of a SUT based on Theodolite measurement data.\n",
+    "\n",
+    "Theodolite's *demand* metric is a function, mapping load intensities to the minimum required resources (e.g., instances) that are required to process this load. With this notebook, the *demand* metric function is approximated by a map of tested load intensities to their minimum required resources.\n",
+    "\n",
+    "The final output when running this notebook will be a CSV file, providig this mapping. It can be used to create nice plots of a system's scalability using the `demand-metric-plot.ipynb` notebook."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "source": [
+    "In the following cell, we need to specifiy:\n",
+    "\n",
+    "* `exp_id`: The experiment id  that is to be analyzed.\n",
+    "* `warmup_sec`: The number of seconds which are to be ignored in the beginning of each experiment.\n",
+    "* `max_lag_trend_slope`: The maximum tolerable increase in queued messages per second.\n",
+    "* `measurement_dir`: The directory where the measurement data files are to be found.\n",
+    "* `results_dir`: The directory where the computed demand CSV files are to be stored."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "exp_id = 200\n",
+    "warmup_sec = 60\n",
+    "max_lag_trend_slope = 2000\n",
+    "measurement_dir = '<path-to>/measurements'\n",
+    "results_dir = '<path-to>/results'\n"
+   ]
+  },
+  {
+   "source": [
+    "With the following call, we compute our demand mapping."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from src.demand import demand\n",
+    "\n",
+    "demand = demand(exp_id, measurement_dir, max_lag_trend_slope, warmup_sec)"
+   ]
+  },
+  {
+   "source": [
+    "We might already want to plot a simple visualization here:"
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "demand.plot(kind='line',x='load',y='resources')"
+   ]
+  },
+  {
+   "source": [
+    "Finally we store the results in a CSV file."
+   ],
+   "cell_type": "markdown",
+   "metadata": {}
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "\n",
+    "demand.to_csv(os.path.join(results_dir, f'exp{exp_id}_demand.csv'), index=False)"
+   ]
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python",
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "version": "3.8.5-final"
+  },
+  "orig_nbformat": 2,
+  "file_extension": ".py",
+  "mimetype": "text/x-python",
+  "name": "python",
+  "npconvert_exporter": "python",
+  "pygments_lexer": "ipython3",
+  "version": 3,
+  "kernelspec": {
+   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
+   "display_name": "Python 3.7.0 64-bit ('.venv': venv)",
+   "language": "python"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/analysis/scalability-graph-finish.ipynb b/analysis/scalability-graph-plotter.ipynb
similarity index 100%
rename from analysis/scalability-graph-finish.ipynb
rename to analysis/scalability-graph-plotter.ipynb
diff --git a/analysis/scalability-graph.ipynb b/analysis/scalability-graph.ipynb
index 868f950dfea091b8fd6dbc78dc4b7471086c8947..8e4b3bd99ef032b75826535eaebd2b435ccf0881 100644
--- a/analysis/scalability-graph.ipynb
+++ b/analysis/scalability-graph.ipynb
@@ -245,7 +245,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "min_suitable_instances.to_csv(os.path.join(directory_out, f'../results-inst/exp{exp_id}_min-suitable-instances.csv'), index=False)"
+    "min_suitable_instances.to_csv(os.path.join(directory_out, f'exp{exp_id}_min-suitable-instances.csv'), index=False)"
    ]
   },
   {
diff --git a/analysis/src/demand.py b/analysis/src/demand.py
new file mode 100644
index 0000000000000000000000000000000000000000..dfb20c05af8e9a134eedd2cdb584c961a82369f5
--- /dev/null
+++ b/analysis/src/demand.py
@@ -0,0 +1,59 @@
+import os
+from datetime import datetime, timedelta, timezone
+import pandas as pd
+from sklearn.linear_model import LinearRegression
+
+def demand(exp_id, directory, threshold, warmup_sec):
+    raw_runs = []
+
+    # Compute SL, i.e., lag trend, for each tested configuration
+    filenames = [filename for filename in os.listdir(directory) if filename.startswith(f"exp{exp_id}") and filename.endswith("totallag.csv")]
+    for filename in filenames:
+        #print(filename)
+        run_params = filename[:-4].split("_")
+        dim_value = run_params[2]
+        instances = run_params[3]
+
+        df = pd.read_csv(os.path.join(directory, filename))
+        #input = df.loc[df['topic'] == "input"]
+        input = df
+        #print(input)
+        input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
+        #print(input)
+        #print(input.iloc[0, 'timestamp'])
+        regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
+        #regress = input
+
+        #input.plot(kind='line',x='timestamp',y='value',color='red')
+        #plt.show()
+
+        X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array
+        Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
+        linear_regressor = LinearRegression()  # create object for the class
+        linear_regressor.fit(X, Y)  # perform linear regression
+        Y_pred = linear_regressor.predict(X)  # make predictions
+
+        trend_slope = linear_regressor.coef_[0][0]
+        #print(linear_regressor.coef_)
+
+        row = {'load': int(dim_value), 'resources': int(instances), 'trend_slope': trend_slope}
+        #print(row)
+        raw_runs.append(row)
+
+    runs = pd.DataFrame(raw_runs)
+
+    # Set suitable = True if SLOs are met, i.e., lag trend is below threshold
+    runs["suitable"] =  runs.apply(lambda row: row['trend_slope'] < threshold, axis=1)
+
+    # Sort results table (unsure if required)
+    runs.columns = runs.columns.str.strip()
+    runs.sort_values(by=["load", "resources"])
+
+    # Filter only suitable configurations
+    filtered = runs[runs.apply(lambda x: x['suitable'], axis=1)]
+
+    # Compute demand per load intensity
+    grouped = filtered.groupby(['load'])['resources'].min()
+    demand_per_load = grouped.to_frame().reset_index()
+
+    return demand_per_load
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
deleted file mode 100644
index 8c758c24444ea9c590c364063a397f9b7bfec8f9..0000000000000000000000000000000000000000
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
+++ /dev/null
@@ -1,156 +0,0 @@
-package theodolite.commons.kafkastreams;
-
-import java.util.Objects;
-import java.util.Properties;
-import org.apache.kafka.streams.KafkaStreams;
-import org.apache.kafka.streams.StreamsConfig;
-import org.apache.kafka.streams.Topology;
-import titan.ccp.common.kafka.streams.PropertiesBuilder;
-
-/**
- * Builder for the Kafka Streams configuration.
- */
-public abstract class KafkaStreamsBuilder {
-
-  // Kafkastreams application specific
-  protected String schemaRegistryUrl; // NOPMD for use in subclass
-
-  private String applicationName; // NOPMD
-  private String applicationVersion; // NOPMD
-  private String bootstrapServers; // NOPMD
-  private int numThreads = -1; // NOPMD
-  private int commitIntervalMs = -1; // NOPMD
-  private int cacheMaxBytesBuff = -1; // NOPMD
-
-  /**
-   * Sets the application name for the {@code KafkaStreams} application. It is used to create the
-   * application ID.
-   *
-   * @param applicationName Name of the application.
-   * @return
-   */
-  public KafkaStreamsBuilder applicationName(final String applicationName) {
-    this.applicationName = applicationName;
-    return this;
-  }
-
-  /**
-   * Sets the application version for the {@code KafkaStreams} application. It is used to create the
-   * application ID.
-   *
-   * @param applicationVersion Version of the application.
-   * @return
-   */
-  public KafkaStreamsBuilder applicationVersion(final String applicationVersion) {
-    this.applicationVersion = applicationVersion;
-    return this;
-  }
-
-  /**
-   * Sets the bootstrap servers for the {@code KafkaStreams} application.
-   *
-   * @param bootstrapServers String for a bootstrap server.
-   * @return
-   */
-  public KafkaStreamsBuilder bootstrapServers(final String bootstrapServers) {
-    this.bootstrapServers = bootstrapServers;
-    return this;
-  }
-
-  /**
-   * Sets the URL for the schema registry.
-   *
-   * @param url The URL of the schema registry.
-   * @return
-   */
-  public KafkaStreamsBuilder schemaRegistry(final String url) {
-    this.schemaRegistryUrl = url;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
-   * one for using the default.
-   *
-   * @param numThreads Number of threads. -1 for using the default.
-   * @return
-   */
-  public KafkaStreamsBuilder numThreads(final int numThreads) {
-    if (numThreads < -1 || numThreads == 0) {
-      throw new IllegalArgumentException("Number of threads must be greater 0 or -1.");
-    }
-    this.numThreads = numThreads;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for the frequency with which to save the position (offsets in
-   * source topics) of tasks (commit.interval.ms). Must be zero for processing all record, for
-   * example, when processing bulks of records. Can be minus one for using the default.
-   *
-   * @param commitIntervalMs Frequency with which to save the position of tasks. In ms, -1 for using
-   *        the default.
-   * @return
-   */
-  public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
-    if (commitIntervalMs < -1) {
-      throw new IllegalArgumentException("Commit interval must be greater or equal -1.");
-    }
-    this.commitIntervalMs = commitIntervalMs;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for maximum number of memory bytes to be used for record caches
-   * across all threads (cache.max.bytes.buffering). Must be zero for processing all record, for
-   * example, when processing bulks of records. Can be minus one for using the default.
-   *
-   * @param cacheMaxBytesBuffering Number of memory bytes to be used for record caches across all
-   *        threads. -1 for using the default.
-   * @return
-   */
-  public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
-    if (cacheMaxBytesBuffering < -1) {
-      throw new IllegalArgumentException("Cache max bytes buffering must be greater or equal -1.");
-    }
-    this.cacheMaxBytesBuff = cacheMaxBytesBuffering;
-    return this;
-  }
-
-  /**
-   * Method to implement a {@link Topology} for a {@code KafkaStreams} application.
-   *
-   * @return A {@code Topology} for a {@code KafkaStreams} application.
-   */
-  protected abstract Topology buildTopology();
-
-  /**
-   * Build the {@link Properties} for a {@code KafkaStreams} application.
-   *
-   * @return A {@code Properties} object.
-   */
-  protected Properties buildProperties() {
-    return PropertiesBuilder
-        .bootstrapServers(this.bootstrapServers)
-        .applicationId(this.applicationName + '-' + this.applicationVersion)
-        .set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
-        .set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
-        .set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
-        .build();
-  }
-
-  /**
-   * Builds the {@link KafkaStreams} instance.
-   */
-  public KafkaStreams build() {
-    // Check for required attributes for building properties.
-    Objects.requireNonNull(this.applicationName, "Application name has not been set.");
-    Objects.requireNonNull(this.applicationVersion, "Application version has not been set.");
-    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
-    Objects.requireNonNull(this.schemaRegistryUrl, "Schema registry has not been set.");
-
-    // Create the Kafka streams instance.
-    return new KafkaStreams(this.buildTopology(), this.buildProperties());
-  }
-
-}
diff --git a/benchmarks/.gitlab-ci.yml b/benchmarks/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1a4d354701459d4730dab398e0210ab9189d7ad3
--- /dev/null
+++ b/benchmarks/.gitlab-ci.yml
@@ -0,0 +1,414 @@
+image: openjdk:11-jdk
+
+# Disable the Gradle daemon for Continuous Integration servers as correctness
+# is usually a priority over speed in CI environments. Using a fresh
+# runtime for each build is more reliable since the runtime is completely
+# isolated from any previous builds.
+variables:
+  GRADLE_OPTS: "-Dorg.gradle.daemon=false"
+
+cache:
+  paths:
+    - .gradle
+
+before_script:
+  - cd benchmarks
+  - export GRADLE_USER_HOME=`pwd`/.gradle
+
+stages:
+  - build
+  - test
+  - check
+  - deploy
+
+build:
+  stage: build
+  tags:
+    - exec-docker
+  script: ./gradlew --build-cache assemble
+  artifacts:
+    paths:
+      - "benchmarks/build/libs/*.jar"
+      - "benchmarks/*/build/distributions/*.tar"
+    expire_in: 1 day
+
+test:
+  stage: test
+  tags:
+    - exec-docker
+  script: ./gradlew test --continue
+  artifacts:
+    reports:
+      junit:
+        - "benchmarks/**/build/test-results/test/TEST-*.xml"
+
+checkstyle:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew checkstyle --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/checkstyle/main.html"
+    when: on_failure
+    expire_in: 1 day
+
+pmd:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew pmd --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/pmd/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+spotbugs:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew spotbugs --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/spotbugs/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+
+.deploy:
+  stage: deploy
+  tags:
+    - exec-dind
+  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
+  # for image usage and settings for building with TLS and docker in docker
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
+    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
+    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
+    - docker logout
+  rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+.deploy-ghcr:
+  stage: deploy
+  tags:
+    - exec-dind
+  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
+  # for image usage and settings for building with TLS and docker in docker
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
+    - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin
+    - docker push ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME
+    - docker logout
+  rules:
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc1-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc2-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc3-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc4-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc1-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc2-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc3-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc4-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
diff --git a/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from .settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/.settings/org.eclipse.jdt.ui.prefs
diff --git a/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from .settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from .settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/application-kafkastreams-commons/build.gradle b/benchmarks/application-kafkastreams-commons/build.gradle
similarity index 100%
rename from application-kafkastreams-commons/build.gradle
rename to benchmarks/application-kafkastreams-commons/build.gradle
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
similarity index 85%
rename from application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
rename to benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
index 6302e4c69904aaf57e3f936ee9ad0ead11414a8d..ca1838b84a4f1b3ddf11ad4dea8e34792371974b 100644
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
+++ b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
@@ -9,12 +9,6 @@ public final class ConfigurationKeys {
 
   public static final String APPLICATION_VERSION = "application.version";
 
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
   public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
 
   public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
diff --git a/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..89bd3147f0d3bb7a5fecc5d8c7d277bd294494ad
--- /dev/null
+++ b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
@@ -0,0 +1,124 @@
+package theodolite.commons.kafkastreams;
+
+import java.util.Properties;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
+import titan.ccp.common.kafka.streams.PropertiesBuilder;
+
+/**
+ * Builder for the Kafka Streams configuration.
+ */
+public abstract class KafkaStreamsBuilder {
+
+  // Kafka Streams application specific
+  protected final String schemaRegistryUrl; // NOPMD for use in subclass
+  protected final String inputTopic; // NOPMD for use in subclass
+
+  private final Configuration config;
+
+  private final String applicationName; // NOPMD
+  private final String applicationVersion; // NOPMD
+  private final String bootstrapServers; // NOPMD
+
+  /**
+   * Construct a new Build object for a Kafka Streams application.
+   *
+   * @param config Contains the key value pairs for configuration.
+   */
+  public KafkaStreamsBuilder(final Configuration config) {
+    this.config = config;
+    this.applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    this.applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.bootstrapServers = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    this.schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    this.inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+  }
+
+  /**
+   * Checks if the given key is contained in the configurations and sets it in the properties.
+   *
+   * @param <T> Type of the value for given key
+   * @param propBuilder Object where to set this property.
+   * @param key The key to check and set the property.
+   * @param valueGetter Method to get the value from with given key.
+   * @param condition for setting the property.
+   */
+  private <T> void setOptionalProperty(final PropertiesBuilder propBuilder,
+      final String key,
+      final Function<String, T> valueGetter,
+      final Predicate<T> condition) {
+    if (this.config.containsKey(key)) {
+      final T value = valueGetter.apply(key);
+      propBuilder.set(key, value, condition);
+    }
+  }
+
+  /**
+   * Build the {@link Properties} for a {@code KafkaStreams} application.
+   *
+   * @return A {@code Properties} object.
+   */
+  protected Properties buildProperties() {
+    // required configuration
+    final PropertiesBuilder propBuilder = PropertiesBuilder
+        .bootstrapServers(this.bootstrapServers)
+        .applicationId(this.applicationName + '-' + this.applicationVersion);
+
+    // optional configurations
+    this.setOptionalProperty(propBuilder, StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG,
+        this.config::getInt, p -> p > 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG,
+        this.config::getInt,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.COMMIT_INTERVAL_MS_CONFIG,
+        this.config::getInt, p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.MAX_TASK_IDLE_MS_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG,
+        this.config::getInt, p -> p >= 1);
+    this.setOptionalProperty(propBuilder, StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG,
+        this.config::getInt, p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.NUM_STREAM_THREADS_CONFIG,
+        this.config::getInt, p -> p > 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.POLL_MS_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.PROCESSING_GUARANTEE_CONFIG,
+        this.config::getString, p -> StreamsConfig.AT_LEAST_ONCE.equals(p)
+            || StreamsConfig.EXACTLY_ONCE.equals(p) || StreamsConfig.EXACTLY_ONCE_BETA.equals(p));
+    this.setOptionalProperty(propBuilder, StreamsConfig.REPLICATION_FACTOR_CONFIG,
+        this.config::getInt, p -> p >= 0);
+
+    if (this.config.containsKey(StreamsConfig.TOPOLOGY_OPTIMIZATION)
+        && this.config.getBoolean(StreamsConfig.TOPOLOGY_OPTIMIZATION)) {
+      propBuilder.set(StreamsConfig.TOPOLOGY_OPTIMIZATION, StreamsConfig.OPTIMIZE);
+    }
+
+    return propBuilder.build();
+  }
+
+  /**
+   * Method to implement a {@link Topology} for a {@code KafkaStreams} application.
+   *
+   * @return A {@code Topology} for a {@code KafkaStreams} application.
+   */
+  protected abstract Topology buildTopology(Properties properties);
+
+  /**
+   * Builds the {@link KafkaStreams} instance.
+   */
+  public KafkaStreams build() {
+    // Create the Kafka streams instance.
+    final Properties properties = this.buildProperties();
+    return new KafkaStreams(this.buildTopology(properties), properties);
+  }
+
+}
diff --git a/build.gradle b/benchmarks/build.gradle
similarity index 91%
rename from build.gradle
rename to benchmarks/build.gradle
index 1e388cb9665b43e004a1854248acc04e1cda387c..3cb86b68e9d37c53572c6611fad1057b5505e9cc 100644
--- a/build.gradle
+++ b/benchmarks/build.gradle
@@ -6,7 +6,7 @@ buildscript {
     }
   }
   dependencies {
-    classpath "gradle.plugin.com.github.spotbugs:spotbugs-gradle-plugin:1.6.3"
+    classpath "gradle.plugin.com.github.spotbugs.snom:spotbugs-gradle-plugin:4.6.0"
   }
 }
 
@@ -65,6 +65,7 @@ configure(useCaseApplications) {
       implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
       implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
       implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
+      implementation 'com.google.code.gson:gson:2.8.2'
       implementation 'com.google.guava:guava:24.1-jre'
       implementation 'org.jctools:jctools-core:2.1.1'
       implementation 'org.slf4j:slf4j-simple:1.7.25'
@@ -100,6 +101,7 @@ configure(commonProjects) {
       implementation 'org.slf4j:slf4j-simple:1.7.25'
       implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
       implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+      implementation 'org.apache.kafka:kafka-streams:2.6.0'
 
       // Use JUnit test framework
       testImplementation 'junit:junit:4.12'
@@ -108,7 +110,7 @@ configure(commonProjects) {
 
 // Per default XML reports for SpotBugs are generated
 // Include this to generate HTML reports
-tasks.withType(com.github.spotbugs.SpotBugsTask) {
+tasks.withType(com.github.spotbugs.snom.SpotBugsTask) {
   reports {
     // Either HTML or XML reports can be activated
     html.enabled true
@@ -165,7 +167,7 @@ subprojects {
     reportLevel = "low"
     effort = "max"
     ignoreFailures = false
-    toolVersion = '3.1.7'
+    toolVersion = '4.1.4'
   }
 }
 
diff --git a/config/README.md b/benchmarks/config/README.md
similarity index 100%
rename from config/README.md
rename to benchmarks/config/README.md
diff --git a/config/checkstyle-suppression.xml b/benchmarks/config/checkstyle-suppression.xml
similarity index 100%
rename from config/checkstyle-suppression.xml
rename to benchmarks/config/checkstyle-suppression.xml
diff --git a/config/checkstyle.xml b/benchmarks/config/checkstyle.xml
similarity index 100%
rename from config/checkstyle.xml
rename to benchmarks/config/checkstyle.xml
diff --git a/config/eclipse-cleanup.xml b/benchmarks/config/eclipse-cleanup.xml
similarity index 100%
rename from config/eclipse-cleanup.xml
rename to benchmarks/config/eclipse-cleanup.xml
diff --git a/config/eclipse-formatter.xml b/benchmarks/config/eclipse-formatter.xml
similarity index 100%
rename from config/eclipse-formatter.xml
rename to benchmarks/config/eclipse-formatter.xml
diff --git a/config/eclipse-import-order.importorder b/benchmarks/config/eclipse-import-order.importorder
similarity index 100%
rename from config/eclipse-import-order.importorder
rename to benchmarks/config/eclipse-import-order.importorder
diff --git a/config/pmd.xml b/benchmarks/config/pmd.xml
similarity index 100%
rename from config/pmd.xml
rename to benchmarks/config/pmd.xml
diff --git a/config/spotbugs-exclude-filter.xml b/benchmarks/config/spotbugs-exclude-filter.xml
similarity index 100%
rename from config/spotbugs-exclude-filter.xml
rename to benchmarks/config/spotbugs-exclude-filter.xml
diff --git a/benchmarks/gradle/wrapper/gradle-wrapper.jar b/benchmarks/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..457aad0d98108420a977756b7145c93c8910b076
Binary files /dev/null and b/benchmarks/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/benchmarks/gradle/wrapper/gradle-wrapper.properties
similarity index 91%
rename from gradle/wrapper/gradle-wrapper.properties
rename to benchmarks/gradle/wrapper/gradle-wrapper.properties
index e0b3fb8d70b1bbf790f6f8ed1c928ddf09f54628..4d9ca1649142b0c20144adce78e2472e2da01c30 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/benchmarks/gradle/wrapper/gradle-wrapper.properties
@@ -1,5 +1,5 @@
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip
 zipStoreBase=GRADLE_USER_HOME
 zipStorePath=wrapper/dists
diff --git a/gradlew b/benchmarks/gradlew
similarity index 99%
rename from gradlew
rename to benchmarks/gradlew
index cccdd3d517fc5249beaefa600691cf150f2fa3e6..af6708ff229fda75da4f7cc4da4747217bac4d53 100755
--- a/gradlew
+++ b/benchmarks/gradlew
@@ -28,7 +28,7 @@ APP_NAME="Gradle"
 APP_BASE_NAME=`basename "$0"`
 
 # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-DEFAULT_JVM_OPTS=""
+DEFAULT_JVM_OPTS='"-Xmx64m"'
 
 # Use the maximum available, or set MAX_FD != -1 to use that value.
 MAX_FD="maximum"
diff --git a/gradlew.bat b/benchmarks/gradlew.bat
similarity index 94%
rename from gradlew.bat
rename to benchmarks/gradlew.bat
index e95643d6a2ca62258464e83c72f5156dc941c609..0f8d5937c4ad18feb44a19e55ad1e37cc159260f 100644
--- a/gradlew.bat
+++ b/benchmarks/gradlew.bat
@@ -14,7 +14,7 @@ set APP_BASE_NAME=%~n0
 set APP_HOME=%DIRNAME%
 
 @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS=
+set DEFAULT_JVM_OPTS="-Xmx64m"
 
 @rem Find java.exe
 if defined JAVA_HOME goto findJavaFromJavaHome
diff --git a/settings.gradle b/benchmarks/settings.gradle
similarity index 100%
rename from settings.gradle
rename to benchmarks/settings.gradle
diff --git a/uc1-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc1-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc1-application/Dockerfile b/benchmarks/uc1-application/Dockerfile
similarity index 100%
rename from uc1-application/Dockerfile
rename to benchmarks/uc1-application/Dockerfile
diff --git a/uc1-application/build.gradle b/benchmarks/uc1-application/build.gradle
similarity index 100%
rename from uc1-application/build.gradle
rename to benchmarks/uc1-application/build.gradle
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
similarity index 56%
rename from uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
index a35cc37b36fb906e5c5495006126374d4de4656c..f0d8062a2442181507c0bef990b73e0e9cf4a372 100644
--- a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
@@ -3,7 +3,6 @@ package theodolite.uc1.application;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
-import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc1.streamprocessing.Uc1KafkaStreamsBuilder;
 import titan.ccp.common.configuration.ServiceConfigurations;
 
@@ -31,18 +30,9 @@ public class HistoryService {
    */
   private void createKafkaStreamsApplication() {
 
-    final Uc1KafkaStreamsBuilder uc1KafkaStreamsBuilder = new Uc1KafkaStreamsBuilder();
-    uc1KafkaStreamsBuilder.inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC));
-
-    final KafkaStreams kafkaStreams = uc1KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .build();
+    final Uc1KafkaStreamsBuilder uc1KafkaStreamsBuilder = new Uc1KafkaStreamsBuilder(this.config);
+
+    final KafkaStreams kafkaStreams = uc1KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
 
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
similarity index 92%
rename from uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
index 1c30e0c2c83b3d8a2f3dca4df0c7aec99cc4f450..75c833aa722654395b1adc6f739395eea5256820 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
@@ -1,6 +1,7 @@
 package theodolite.uc1.streamprocessing;
 
 import com.google.gson.Gson;
+import java.util.Properties;
 import org.apache.kafka.common.serialization.Serdes;
 import org.apache.kafka.streams.StreamsBuilder;
 import org.apache.kafka.streams.Topology;
@@ -36,7 +37,7 @@ public class TopologyBuilder {
   /**
    * Build the {@link Topology} for the History microservice.
    */
-  public Topology build() {
+  public Topology build(final Properties properties) {
     this.builder
         .stream(this.inputTopic, Consumed.with(
             Serdes.String(),
@@ -44,6 +45,6 @@ public class TopologyBuilder {
         .mapValues(v -> this.gson.toJson(v))
         .foreach((k, v) -> LOGGER.info("Key: " + k + " Value: " + v));
 
-    return this.builder.build();
+    return this.builder.build(properties);
   }
 }
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
similarity index 69%
rename from uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
index 7699ecb48369a2041777b901931c46072a10d99f..cc39bb04623c06a4d41cb2c695804ed41818a67c 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
@@ -1,6 +1,8 @@
 package theodolite.uc1.streamprocessing;
 
 import java.util.Objects;
+import java.util.Properties;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -9,17 +11,15 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  * Builder for the Kafka Streams configuration.
  */
 public class Uc1KafkaStreamsBuilder extends KafkaStreamsBuilder {
-  private String inputTopic; // NOPMD
 
-  public KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc1KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   @Override
-  protected Topology buildTopology() {
+  protected Topology buildTopology(final Properties properties) {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
     return new TopologyBuilder(this.inputTopic,
-        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl)).build();
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl)).build(properties);
   }
 }
diff --git a/uc1-application/src/main/resources/META-INF/application.properties b/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
similarity index 65%
rename from uc1-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc1-application/src/main/resources/META-INF/application.properties
index 3fb301516daa4c7e14875d3d9ca9df9c770eb69e..b46e6246e248cc524c5b6249348c76ded6ec468b 100644
--- a/uc1-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
@@ -3,10 +3,6 @@ application.version=0.0.1
 
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
-kafka.output.topic=output
 
 schema.registry.url=http://localhost:8091
 
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc1-workload-generator/Dockerfile b/benchmarks/uc1-workload-generator/Dockerfile
similarity index 100%
rename from uc1-workload-generator/Dockerfile
rename to benchmarks/uc1-workload-generator/Dockerfile
diff --git a/uc1-workload-generator/build.gradle b/benchmarks/uc1-workload-generator/build.gradle
similarity index 100%
rename from uc1-workload-generator/build.gradle
rename to benchmarks/uc1-workload-generator/build.gradle
diff --git a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
diff --git a/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc2-application/Dockerfile b/benchmarks/uc2-application/Dockerfile
similarity index 100%
rename from uc2-application/Dockerfile
rename to benchmarks/uc2-application/Dockerfile
diff --git a/uc2-application/README.md b/benchmarks/uc2-application/README.md
similarity index 100%
rename from uc2-application/README.md
rename to benchmarks/uc2-application/README.md
diff --git a/uc2-application/build.gradle b/benchmarks/uc2-application/build.gradle
similarity index 100%
rename from uc2-application/build.gradle
rename to benchmarks/uc2-application/build.gradle
diff --git a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
similarity index 67%
rename from uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
index c094adfcd7952e81115dae84ed9c0d371e380c98..2f828278f5a3033c3e479bf82f3c8c5d9d4c380c 100644
--- a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
@@ -36,26 +36,15 @@ public class AggregationService {
    * @param clusterSession the database session which the application should use.
    */
   private void createKafkaStreamsApplication() {
-    // Use case specific stream configuration
-    final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder();
+    final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder(this.config);
     uc2KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .feedbackTopic(this.config.getString(ConfigurationKeys.KAFKA_FEEDBACK_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .configurationTopic(this.config.getString(ConfigurationKeys.KAFKA_CONFIGURATION_TOPIC))
         .emitPeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.EMIT_PERIOD_MS)))
         .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.GRACE_PERIOD_MS)));
 
-    // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
similarity index 98%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
index c09fa3ead7553bda5cd8e8f09079f846b89d5d17..74e9bb99b80efec4c27d7eb50668d622a5d951f9 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
@@ -1,6 +1,7 @@
 package theodolite.uc2.streamprocessing;
 
 import java.time.Duration;
+import java.util.Properties;
 import java.util.Set;
 import org.apache.kafka.common.serialization.Serdes;
 import org.apache.kafka.streams.KeyValue;
@@ -17,8 +18,6 @@ import org.apache.kafka.streams.kstream.Suppressed.BufferConfig;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.apache.kafka.streams.kstream.Windowed;
 import org.apache.kafka.streams.kstream.WindowedSerdes;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.configuration.events.EventSerde;
@@ -38,7 +37,7 @@ public class TopologyBuilder {
   private final Duration emitPeriod;
   private final Duration gracePeriod;
 
-  // SERDEs
+  // Serdes
   private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
 
   private final StreamsBuilder builder = new StreamsBuilder();
@@ -61,9 +60,9 @@ public class TopologyBuilder {
       final Duration emitPeriod, final Duration gracePeriod,
       final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory) {
     this.inputTopic = inputTopic;
+    this.outputTopic = outputTopic;
     this.feedbackTopic = feedbackTopic;
     this.configurationTopic = configurationTopic;
-    this.outputTopic = outputTopic;
     this.emitPeriod = emitPeriod;
     this.gracePeriod = gracePeriod;
 
@@ -73,7 +72,7 @@ public class TopologyBuilder {
   /**
    * Build the {@link Topology} for the Aggregation microservice.
    */
-  public Topology build() {
+  public Topology build(final Properties properties) {
     // 1. Build Parent-Sensor Table
     final KTable<String, Set<String>> parentSensorTable = this.buildParentSensorTable();
 
@@ -94,7 +93,7 @@ public class TopologyBuilder {
     // 5. Expose Aggregations Stream
     this.exposeOutputStream(aggregations);
 
-    return this.builder.build();
+    return this.builder.build(properties);
   }
 
   private KTable<String, ActivePowerRecord> buildInputTable() {
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
similarity index 89%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
index 16addb8510eec2254d4787edbfbfbe186996fdea..7e077b101c0e1bfab359fc347ffe8c4acc9b88fc 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
@@ -2,6 +2,8 @@ package theodolite.uc2.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import java.util.Properties;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -14,16 +16,14 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
   private static final Duration EMIT_PERIOD_DEFAULT = Duration.ofSeconds(1);
   private static final Duration GRACE_PERIOD_DEFAULT = Duration.ZERO;
 
-  private String inputTopic; // NOPMD
   private String feedbackTopic; // NOPMD
   private String outputTopic; // NOPMD
   private String configurationTopic; // NOPMD
   private Duration emitPeriod; // NOPMD
   private Duration gracePeriod; // NOPMD
 
-  public Uc2KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc2KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc2KafkaStreamsBuilder feedbackTopic(final String feedbackTopic) {
@@ -52,7 +52,7 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
   }
 
   @Override
-  protected Topology buildTopology() {
+  protected Topology buildTopology(final Properties properties) {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
     Objects.requireNonNull(this.feedbackTopic, "Feedback topic has not been set.");
     Objects.requireNonNull(this.outputTopic, "Output topic has not been set.");
@@ -60,14 +60,14 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
 
     final TopologyBuilder topologyBuilder = new TopologyBuilder(
         this.inputTopic,
-        this.feedbackTopic,
         this.outputTopic,
+        this.feedbackTopic,
         this.configurationTopic,
         this.emitPeriod == null ? EMIT_PERIOD_DEFAULT : this.emitPeriod,
         this.gracePeriod == null ? GRACE_PERIOD_DEFAULT : this.gracePeriod,
         new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl));
 
-    return topologyBuilder.build();
+    return topologyBuilder.build(properties);
   }
 
 }
diff --git a/uc2-application/src/main/resources/META-INF/application.properties b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
similarity index 78%
rename from uc2-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc2-application/src/main/resources/META-INF/application.properties
index 10c47960adb012ba5c572e3833a37d821189eb8e..8f1af5f590eff7f2b12706d61a7c89d9152f7949 100644
--- a/uc2-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
@@ -10,8 +10,4 @@ kafka.output.topic=output
 schema.registry.url=http://localhost:8091
 
 emit.period.ms=5000
-grace.period.ms=0
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
+grace.period.ms=0
\ No newline at end of file
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
diff --git a/uc2-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc2-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc2-workload-generator/Dockerfile b/benchmarks/uc2-workload-generator/Dockerfile
similarity index 100%
rename from uc2-workload-generator/Dockerfile
rename to benchmarks/uc2-workload-generator/Dockerfile
diff --git a/uc2-workload-generator/build.gradle b/benchmarks/uc2-workload-generator/build.gradle
similarity index 100%
rename from uc2-workload-generator/build.gradle
rename to benchmarks/uc2-workload-generator/build.gradle
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
diff --git a/uc2-workload-generator/src/main/resources/META-INF/application.properties b/benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties
similarity index 100%
rename from uc2-workload-generator/src/main/resources/META-INF/application.properties
rename to benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties
diff --git a/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java b/benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
similarity index 100%
rename from uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
rename to benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
diff --git a/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc3-application/Dockerfile b/benchmarks/uc3-application/Dockerfile
similarity index 100%
rename from uc3-application/Dockerfile
rename to benchmarks/uc3-application/Dockerfile
diff --git a/benchmarks/uc3-application/build.gradle b/benchmarks/uc3-application/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..aa96b6dbf90c4895dfda57a51c753c9103c29414
--- /dev/null
+++ b/benchmarks/uc3-application/build.gradle
@@ -0,0 +1 @@
+mainClassName = "theodolite.uc3.application.HistoryService"
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
similarity index 63%
rename from uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
index b245b1645c9e5ee68df3f108802c9b91d70cf017..349512f988bb182d8851e458a1bce244c756bbfe 100644
--- a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
@@ -34,23 +34,13 @@ public class HistoryService {
    *
    */
   private void createKafkaStreamsApplication() {
-    // Use case specific stream configuration
-    final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder();
+    final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder(this.config);
     uc3KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .windowDuration(Duration.ofMinutes(this.windowDurationMinutes));
 
-    // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder.build();
+
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
   }
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
similarity index 95%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
index 74eed74c52a78df229c02542bc6e66d7f796c2c7..d6d6d4ffb7ebb1236be73dd681c900311853e732 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc3.streamprocessing;
 
 import com.google.common.math.Stats;
 import java.time.Duration;
+import java.util.Properties;
 import org.apache.kafka.common.serialization.Serdes;
 import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.StreamsBuilder;
@@ -46,7 +47,7 @@ public class TopologyBuilder {
   /**
    * Build the {@link Topology} for the History microservice.
    */
-  public Topology build() {
+  public Topology build(final Properties properties) {
     this.builder
         .stream(this.inputTopic,
             Consumed.with(Serdes.String(),
@@ -68,6 +69,6 @@ public class TopologyBuilder {
         .peek((k, v) -> LOGGER.info(k + ": " + v))
         .to(this.outputTopic, Produced.with(Serdes.String(), Serdes.String()));
 
-    return this.builder.build();
+    return this.builder.build(properties);
   }
 }
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
similarity index 81%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
index e74adf7c87673cc0e6ea4004dbcb1c0a6fc907ac..70113271a9d3c23499b85c07bf9d0a76db59f820 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
@@ -2,6 +2,8 @@ package theodolite.uc3.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import java.util.Properties;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -11,13 +13,11 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  */
 public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder {
 
-  private String inputTopic; // NOPMD
   private String outputTopic; // NOPMD
   private Duration windowDuration; // NOPMD
 
-  public Uc3KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc3KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc3KafkaStreamsBuilder outputTopic(final String outputTopic) {
@@ -31,14 +31,14 @@ public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder {
   }
 
   @Override
-  protected Topology buildTopology() {
+  protected Topology buildTopology(final Properties properties) {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
     Objects.requireNonNull(this.outputTopic, "Output topic has not been set.");
     Objects.requireNonNull(this.windowDuration, "Window duration has not been set.");
 
     final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic,
         new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), this.windowDuration);
-    return topologyBuilder.build();
+    return topologyBuilder.build(properties);
   }
 
 }
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
diff --git a/uc3-application/src/main/resources/META-INF/application.properties b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
similarity index 77%
rename from uc3-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc3-application/src/main/resources/META-INF/application.properties
index 2ceaf37224b0bff54b09beaabe29210216e11671..011406f7ef1e23647eeae150d349f472214cbcd4 100644
--- a/uc3-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
@@ -7,7 +7,3 @@ kafka.output.topic=output
 kafka.window.duration.minutes=1
 
 schema.registry.url=http://localhost:8091
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc3-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc3-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc3-workload-generator/Dockerfile b/benchmarks/uc3-workload-generator/Dockerfile
similarity index 100%
rename from uc3-workload-generator/Dockerfile
rename to benchmarks/uc3-workload-generator/Dockerfile
diff --git a/uc3-workload-generator/build.gradle b/benchmarks/uc3-workload-generator/build.gradle
similarity index 100%
rename from uc3-workload-generator/build.gradle
rename to benchmarks/uc3-workload-generator/build.gradle
diff --git a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
diff --git a/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc4-application/Dockerfile b/benchmarks/uc4-application/Dockerfile
similarity index 100%
rename from uc4-application/Dockerfile
rename to benchmarks/uc4-application/Dockerfile
diff --git a/benchmarks/uc4-application/build.gradle b/benchmarks/uc4-application/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..56663022144166711d6bebce0f6480e358a738b5
--- /dev/null
+++ b/benchmarks/uc4-application/build.gradle
@@ -0,0 +1 @@
+mainClassName = "theodolite.uc4.application.HistoryService"
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
similarity index 67%
rename from uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
index 23af805733de2bb3f6384fa924a2322490ee58d9..12f35e8dcc532b19e470722094ba5aff07420ad2 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
@@ -32,9 +32,8 @@ public class HistoryService {
    */
   private void createKafkaStreamsApplication() {
     // Use case specific stream configuration
-    final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder();
+    final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder(this.config);
     uc4KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .aggregtionDuration(
             Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_DURATION_DAYS)))
@@ -42,15 +41,7 @@ public class HistoryService {
             Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_ADVANCE_DAYS)));
 
     // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
similarity index 96%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
index a92abae6e11c4bf66a5d8d8dee0f10b088e8274b..a0c87ba4702b9c3f191291a3f04679cc73fcb04b 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
@@ -5,6 +5,7 @@ import java.time.Duration;
 import java.time.Instant;
 import java.time.LocalDateTime;
 import java.time.ZoneId;
+import java.util.Properties;
 import org.apache.kafka.common.serialization.Serde;
 import org.apache.kafka.common.serialization.Serdes;
 import org.apache.kafka.streams.KeyValue;
@@ -54,7 +55,7 @@ public class TopologyBuilder {
   /**
    * Build the {@link Topology} for the History microservice.
    */
-  public Topology build() {
+  public Topology build(final Properties properties) {
     final StatsKeyFactory<HourOfDayKey> keyFactory = new HourOfDayKeyFactory();
     final Serde<HourOfDayKey> keySerde = HourOfDayKeySerde.create();
 
@@ -89,6 +90,6 @@ public class TopologyBuilder {
                 Serdes.String()));
     // this.serdes.avroValues()));
 
-    return this.builder.build();
+    return this.builder.build(properties);
   }
 }
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
similarity index 85%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
index 7c9e2c4f790cf1fbb7dd34db573576d1e64077db..67c652967194f59db560b8ad6fd86410725b3c9c 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
@@ -2,6 +2,8 @@ package theodolite.uc4.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import java.util.Properties;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -11,14 +13,12 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  */
 public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
 
-  private String inputTopic; // NOPMD
   private String outputTopic; // NOPMD
   private Duration aggregtionDuration; // NOPMD
   private Duration aggregationAdvance; // NOPMD
 
-  public Uc4KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc4KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc4KafkaStreamsBuilder outputTopic(final String outputTopic) {
@@ -37,7 +37,7 @@ public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
   }
 
   @Override
-  protected Topology buildTopology() {
+  protected Topology buildTopology(final Properties properties) {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
     Objects.requireNonNull(this.outputTopic, "Output topic has not been set.");
     Objects.requireNonNull(this.aggregtionDuration, "Aggregation duration has not been set.");
@@ -50,7 +50,7 @@ public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
         this.aggregtionDuration,
         this.aggregationAdvance);
 
-    return topologyBuilder.build();
+    return topologyBuilder.build(properties);
   }
 
 }
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
diff --git a/uc4-application/src/main/resources/META-INF/application.properties b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
similarity index 76%
rename from uc4-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc4-application/src/main/resources/META-INF/application.properties
index 4d4bc7b5a31d811e856f04561c51fc7ac5a970a8..b46681533e63bf86a51439778a46940da348559d 100644
--- a/uc4-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
@@ -6,6 +6,5 @@ kafka.input.topic=input
 kafka.output.topic=output
 aggregation.duration.days=30
 aggregation.advance.days=1
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
+
+schema.registry.url=http://localhost:8091
diff --git a/uc4-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc4-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc4-workload-generator/Dockerfile b/benchmarks/uc4-workload-generator/Dockerfile
similarity index 100%
rename from uc4-workload-generator/Dockerfile
rename to benchmarks/uc4-workload-generator/Dockerfile
diff --git a/uc4-workload-generator/build.gradle b/benchmarks/uc4-workload-generator/build.gradle
similarity index 100%
rename from uc4-workload-generator/build.gradle
rename to benchmarks/uc4-workload-generator/build.gradle
diff --git a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
diff --git a/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 99%
rename from uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644
--- a/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
+++ b/benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
@@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true
 cleanup.qualify_static_method_accesses_with_declaring_class=false
 cleanup.remove_private_constructors=true
 cleanup.remove_redundant_modifiers=false
-cleanup.remove_redundant_semicolons=false
+cleanup.remove_redundant_semicolons=true
 cleanup.remove_redundant_type_arguments=true
 cleanup.remove_trailing_whitespaces=true
 cleanup.remove_trailing_whitespaces_all=true
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/workload-generator-commons/build.gradle b/benchmarks/workload-generator-commons/build.gradle
similarity index 100%
rename from workload-generator-commons/build.gradle
rename to benchmarks/workload-generator-commons/build.gradle
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/docker-test/uc1-docker-compose/docker-compose.yml
index d394255951151d931b73e4c923bb10ecaed66a2c..905e6e30bfd38900e896be45d8a4b15389b2f54f 100755
--- a/docker-test/uc1-docker-compose/docker-compose.yml
+++ b/docker-test/uc1-docker-compose/docker-compose.yml
@@ -1,26 +1,53 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "9092"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    #ports:
+    #  - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    ports:
+      - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc1-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
     image: theodolite/theodolite-uc1-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/docker-test/uc2-docker-compose/docker-compose.yml
index f730148a89d41a819d81a4770e0d53a960dbe493..e6511bfd9fa7ea1e62bf9f3787ac6f3c0acc0107 100755
--- a/docker-test/uc2-docker-compose/docker-compose.yml
+++ b/docker-test/uc2-docker-compose/docker-compose.yml
@@ -1,26 +1,53 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "9092"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    ports:
+      - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc2-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
     image: theodolite/theodolite-uc2-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/docker-test/uc3-docker-compose/docker-compose.yml
index 2a3cb23a79f9edda699fe1bb07c1b922614aeb13..9d2da8e87621c1902ff101efd42ff52436416b77 100755
--- a/docker-test/uc3-docker-compose/docker-compose.yml
+++ b/docker-test/uc3-docker-compose/docker-compose.yml
@@ -1,27 +1,58 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
+    #image: wurstmeister/zookeeper
+    image: confluentinc/cp-zookeeper
     ports:
       - "2181:2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
     ports:
       - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    ports:
+      - "8081:8081"
+    expose:
+      - "8081"
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc3-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
       KAFKA_WINDOW_DURATION_MINUTES: 60
   uc-wg: 
     image: theodolite/theodolite-uc3-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/docker-test/uc4-docker-compose/docker-compose.yml
index 1f015f23b2e8b98eba27ae6f387adb123ae2ccc2..530852b2df5ef2c70f03a11ac2445ce587a3760f 100755
--- a/docker-test/uc4-docker-compose/docker-compose.yml
+++ b/docker-test/uc4-docker-compose/docker-compose.yml
@@ -1,25 +1,32 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
+    #image: wurstmeister/zookeeper
+    image: confluentinc/cp-zookeeper
     ports:
       - "2181:2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
   schema-registry:
     image: confluentinc/cp-schema-registry:5.3.1
     depends_on:
       - zookeeper
       - kafka
+    ports:
+      - "8081:8081"
     expose:
       - "8081"
     environment:
@@ -27,10 +34,22 @@ services:
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc4-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
   uc-wg: 
     image: theodolite/theodolite-uc4-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
+      NUM_SENSORS: 100
diff --git a/execution/.dockerignore b/execution/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..68e5f21c503a80d7db64722d700351a303ddb9dd
--- /dev/null
+++ b/execution/.dockerignore
@@ -0,0 +1,9 @@
+*
+!requirements.txt
+!uc-workload-generator
+!uc-application
+!strategies
+!lib
+!theodolite.py
+!run_uc.py
+!lag_analysis.py
diff --git a/execution/.gitignore b/execution/.gitignore
index d4dceff0274cd6ab3296e85e995f7e5d504f114d..bac9a5d1eeb12d9e40d38376904e8fb69c0e5231 100644
--- a/execution/.gitignore
+++ b/execution/.gitignore
@@ -1 +1,2 @@
-exp_counter.txt
\ No newline at end of file
+exp_counter.txt
+results
diff --git a/execution/.gitlab-ci.yml b/execution/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5577de7a083708a6bb9b83571f458e2c1fbfb340
--- /dev/null
+++ b/execution/.gitlab-ci.yml
@@ -0,0 +1,61 @@
+stages:
+  - deploy
+
+deploy:
+  stage: deploy
+  tags:
+    - exec-dind
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite ./execution
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:$CI_COMMIT_TAG"
+    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
+    - docker push $DOCKERHUB_ORG/theodolite
+    - docker logout
+  rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - execution/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW"
+      when: manual
+      allow_failure: true
+  
+deploy-ghcr:
+  stage: deploy
+  tags:
+    - exec-dind
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite ./execution
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite ghcr.io/$GITHUB_CR_ORG/theodolite:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite ghcr.io/$GITHUB_CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite ghcr.io/$GITHUB_CR_ORG/theodolite:$CI_COMMIT_TAG"
+    - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin
+    - docker push ghcr.io/$GITHUB_CR_ORG/theodolite
+    - docker logout
+  rules:
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - execution/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN"
+      when: manual
+      allow_failure: true
+      
\ No newline at end of file
diff --git a/execution/Dockerfile b/execution/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e71bc91d9d31bea4c1598292e43d0ab7c193c3fa
--- /dev/null
+++ b/execution/Dockerfile
@@ -0,0 +1,15 @@
+FROM python:3.8
+
+RUN mkdir /app
+WORKDIR /app
+ADD requirements.txt /app/
+RUN pip install -r requirements.txt
+COPY uc-workload-generator /app/uc-workload-generator
+COPY uc-application /app/uc-application
+COPY strategies /app/strategies
+COPY lib /app/lib
+COPY lag_analysis.py /app/
+COPY run_uc.py /app/
+COPY theodolite.py /app/
+
+CMD ["python", "/app/theodolite.py"]
diff --git a/execution/README.md b/execution/README.md
index a56f249a3988273d0b3aafc0a023077328249a87..358ce270400d1e4e4947a8ef736feac74c314163 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -2,30 +2,34 @@
 
 This directory contains the Theodolite framework for executing scalability
 benchmarks in a Kubernetes cluster. As Theodolite aims for executing benchmarks
-in realistic execution environments,, some third-party components are [required](#requirements).
+in realistic execution environments, some third-party components are [required](#installation).
 After everything is installed and configured, you can move on the [execution of
 benchmarks](#execution).
 
-## Requirements
+## Installation
 
-### Kubernetes Cluster
+For executing benchmarks, access to a Kubernetes cluster is required. If you already run other applications inside your
+cluster, you might want to consider creating a dedicated namespace for your benchmarks.
 
-For executing benchmarks, access to Kubernetes cluster is required. We suggest
-to create a dedicated namespace for executing our benchmarks. The following
-services need to be available as well.
+### Installing Dependencies
+
+The following third-party services need to be installed in your cluster. For most of them, the suggested way to install
+them is via [Helm](https://helm.sh).
 
 #### Prometheus
 
 We suggest to use the [Prometheus Operator](https://github.com/coreos/prometheus-operator)
 and create a dedicated Prometheus instance for these benchmarks.
 
-If Prometheus Operator is not already available on your cluster, a convenient
-way to install is via the [**unofficial** Prometheus Operator Helm chart](https://github.com/helm/charts/tree/master/stable/prometheus-operator).
-As you may not need an entire cluster monitoring stack, you can use our Helm
-configuration to only install the operator:
+If Prometheus Operator is not already available on your cluster, a convenient way to install it is via the
+[Prometheus community Helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack).
+As you may not need an entire cluster monitoring stack, you can use our Helm configuration to only install the
+operator:
 
 ```sh
-helm install prometheus-operator stable/prometheus-operator -f infrastructure/prometheus/helm-values.yaml
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm repo update
+helm install prometheus-operator prometheus-community/kube-prometheus-stack -f infrastructure/prometheus/helm-values.yaml
 ```
 
 After installation, you need to create a Prometheus instance:
@@ -34,9 +38,17 @@ After installation, you need to create a Prometheus instance:
 kubectl apply -f infrastructure/prometheus/prometheus.yaml
 ```
 
-You might also need to apply the [ServiceAccount](infrastructure/prometheus/service-account.yaml), [ClusterRole](infrastructure/prometheus/cluster-role.yaml) 
-and the [CusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml),
-depending on your cluster's security policies.
+You might also need to apply the [ClusterRole](infrastructure/prometheus/cluster-role.yaml), the
+[CusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml) and the
+[ServiceAccount](infrastructure/prometheus/service-account.yaml), depending on your cluster's security
+policies. If you are not in the *default* namespace, alter the namespace in
+[Prometheus' ClusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml) accordingly.
+
+```sh
+kubectl apply -f infrastructure/prometheus/cluster-role.yaml
+kubectl apply -f infrastructure/prometheus/cluster-role-binding.yaml
+kubectl apply -f infrastructure/prometheus/service-account.yaml
+```
 
 For the individual benchmarking components to be monitored, [ServiceMonitors](https://github.com/coreos/prometheus-operator#customresourcedefinitions)
 are used. See the corresponding sections below for how to install them.
@@ -47,44 +59,32 @@ As with Prometheus, we suggest to create a dedicated Grafana instance. Grafana
 with our default configuration can be installed with Helm:
 
 ```sh
-helm install grafana stable/grafana -f infrastructure/grafana/values.yaml
+helm repo add grafana https://grafana.github.io/helm-charts
+helm repo update
+helm install grafana grafana/grafana -f infrastructure/grafana/values.yaml
 ```
 
 The official [Grafana Helm Chart repository](https://github.com/helm/charts/tree/master/stable/grafana)
 provides further documentation including a table of configuration options.
 
-We provide ConfigMaps for a [Grafana dashboard](infrastructure/grafana/dashboard-config-map.yaml) and a [Grafana data source](infrastructure/grafana/prometheus-datasource-config-map.yaml).
-
-Create the Configmap for the dashboard:
+We provide ConfigMaps for a [Grafana dashboard](infrastructure/grafana/dashboard-config-map.yaml) and a
+[Grafana data source](infrastructure/grafana/prometheus-datasource-config-map.yaml). Create them as follows:
 
 ```sh
 kubectl apply -f infrastructure/grafana/dashboard-config-map.yaml
-```
-
-Create the Configmap for the data source:
-
-```sh
 kubectl apply -f infrastructure/grafana/prometheus-datasource-config-map.yaml
 ```
 
 #### A Kafka cluster
 
-One possible way to set up a Kafka cluster is via [Confluent's Helm Charts](https://github.com/confluentinc/cp-helm-charts).
-For using these Helm charts and conjuction with the Prometheus Operator (see
-below), we provide a [patch](https://github.com/SoerenHenning/cp-helm-charts)
-for these helm charts. Note that this patch is only required for observation and
-not for the actual benchmark execution and evaluation.
-
-##### Our patched Confluent Helm Charts
-
-To use our patched Confluent Helm Charts clone the
-[chart's repsoitory](https://github.com/SoerenHenning/cp-helm-charts). We also
-provide a [default configuration](infrastructure/kafka/values.yaml). If you do
+We suggest to set up a Kafka cluster via [Confluent's Helm Charts](https://github.com/confluentinc/cp-helm-charts).
+Currently, these charts do not expose all metrics, we would like to display. Thus, we provide a patched version of this chart.
+We also provide a [default configuration](infrastructure/kafka/values.yaml). If you do
 not want to deploy 10 Kafka and 3 Zookeeper instances, alter the configuration
-file accordingly. To install Confluent's Kafka and use the configuration:
+file accordingly. To install the patched Confluent's Kafka with our configuration:
 
 ```sh
-helm install my-confluent <path-to-cp-helm-charts> -f infrastructure/kafka/values.yaml
+helm install my-confluent https://github.com/SoerenHenning/cp-helm-charts/releases/download/v6.0.1-1-JMX-FIX/cp-helm-charts-0.6.0.tgz -f infrastructure/kafka/values.yaml
 ```
 
 To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor:
@@ -93,10 +93,11 @@ To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor:
 kubectl apply -f infrastructure/kafka/service-monitor.yaml
 ```
 
-##### Other options for Kafka
-
 Other Kafka deployments, for example, using Strimzi, should work in a similar way.
 
+*Please note that currently, even if installed differently, the corresponding services must run at
+*my-confluent-cp-kafka:9092*, *my-confluent-cp-zookeeper:2181* and *my-confluent-cp-schema-registry:8081*.
+
 #### A Kafka Client Pod
 
 A permanently running pod used for Kafka configuration is started via:
@@ -123,44 +124,157 @@ To install it:
 helm install kafka-lag-exporter https://github.com/lightbend/kafka-lag-exporter/releases/download/v0.6.3/kafka-lag-exporter-0.6.3.tgz -f infrastructure/kafka-lag-exporter/values.yaml
 ```
 
+### Installing Theodolite
 
-### Python 3.7
+While Theodolite itself has not be installed as it is loaded at runtime (see [execution](#Execution)), it requires some
+resources to be deployed in your cluster. These resources are grouped under RBAC and Volume in the following paragraphs.
 
-For executing benchmarks, a **Python 3.7** installation is required. We suggest
-to use a virtual environment placed in the `.venv` directory (in the Theodolite
-root directory). As set of requirements is needed. You can install them with the following
-command (make sure to be in your virtual environment if you use one):
+#### Theodolite RBAC
+
+**The following step is only required if RBAC is enabled in your cluster.** If you are not sure whether this is the
+case, you want to simply try it without the following step.
+
+If RBAC is enabled in your cluster, you have to allow Theodolite to start and stop pods etc. To do so, deploy the RBAC
+resources via:
+
+```sh
+kubectl apply -f infrastructure/kubernetes/rbac/role.yaml
+kubectl apply -f infrastructure/kubernetes/rbac/role-binding.yaml
+kubectl apply -f infrastructure/kubernetes/rbac/service-account.yaml
+```
+
+#### Theodolite Volume
+
+In order to persistently store benchmark results, Theodolite needs a volume mounted. We provide pre-configured
+declarations for different volume types.
+
+##### *hostPath* volume
+
+Using a [hostPath volume](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) is the easiest option when
+running Theodolite locally, e.g., with minikube or kind.
+
+Just modify `infrastructure/kubernetes/volume-hostpath.yaml` by setting `path` to the directory on your host machine where
+all benchmark results should be stored and run:
 
 ```sh
-pip install -r requirements.txt 
+kubectl apply -f infrastructure/kubernetes/volume-hostpath.yaml
 ```
 
+##### *local* volume
 
-### Required Manual Adjustments
+A [local volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a simple option to use when having
+access (e.g. via SSH) to one of your cluster nodes.
 
-Depending on your setup, some additional adjustments may be necessary:
+You first need to create a directory on a selected node where all benchmark results should be stored. Next, modify
+`infrastructure/kubernetes/volume-local.yaml` by setting `<node-name>` to your selected node. (This node will most
+likely also execute the [Theodolite job](#Execution).) Further, you have to set `path` to the directory on the node you just created. To deploy
+you volume run:
 
-* Change Kafka and Zookeeper servers in the Kubernetes deployments (uc1-application etc.) and `run_XX.sh` scripts
-* Change Prometheus' URL in `lag_analysis.py`
-* Change the path to your Python 3.7 virtual environment in the `run_XX.sh` schripts (to find the venv's `bin/activate`)
-* Change the name of your Kubernetes namespace for [Prometheus' ClusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml)
-* *Please let us know if there are further adjustments necessary*
+```sh
+kubectl apply -f infrastructure/kubernetes/volume-local.yaml
+```
+
+##### Other volumes
 
+To use volumes provided by public cloud providers or network-based file systems, you can use the definitions in
+`infrastructure/kubernetes/` as a starting point. See the offical
+[volumes documentation](https://kubernetes.io/docs/concepts/storage/volumes/) for additional information.
 
 
 ## Execution
 
-The `./theodolite.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
+The preferred way to run scalability benchmarks with Theodolite is to deploy Theodolite
+[Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/) in your cluster. For running
+Theodolite locally on your machine see the description below.
+
+`theodolite.yaml` provides a template for your own Theodolite job. To run your own job, create a copy, give it a name
+(`metadata.name`) and adjust configuration parameters as desired. For a description of available configuration options
+see the [Configuration](#configuration) section below. Note, that you might uncomment the `serviceAccountName` line if
+RBAC is enabled on your cluster (see installation of [Theodolite RBAC](#Theodolite-RBAC)).
+
+To start the execution of a benchmark run (with `<your-theodolite-yaml>` being your job definition):
+
+```sh
+kubectl create -f <your-theodolite-yaml>
+```
+
+This will create a pod with a name such as `your-job-name-xxxxxx`. You can verifiy this via `kubectl get pods`. With
+`kubectl logs -f <your-job-name-xxxxxx>`, you can follow the benchmark execution logs.
+
+Once your job is completed (you can verify via `kubectl get jobs), its results are stored inside your configured
+Kubernetes volume.
+
+**Make sure to always run only one Theodolite job at a time.**
+
+### Configuration
+
+| Command line         | Kubernetes          | Description                                                  |
+| -------------------- | ------------------- | ------------------------------------------------------------ |
+| --uc                 | UC                  | **[Mandatory]** Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`. |
+| --loads              | LOADS               | **[Mandatory]** Values for the workload generator to be tested, should be sorted in ascending order. |
+| --instances          | INSTANCES           | **[Mandatory]** Numbers of instances to be benchmarked, should be sorted in ascending order. |
+| --duration           | DURATION            | Duration in minutes subexperiments should be executed for. *Default:* `5`. |
+| --partitions         | PARTITIONS          | Number of partitions for Kafka topics. *Default:* `40`.      |
+| --cpu-limit          | CPU_LIMIT           | Kubernetes CPU limit for a single Pod.  *Default:* `1000m`.  |
+| --memory-limiT       | MEMORY_LIMIT        | Kubernetes memory limit for a single Pod. *Default:* `4Gi`.  |
+| --domain-restriction | DOMAIN_RESTRICTION  | A flag that indiciates domain restriction should be used. *Default:* not set. For more details see Section [Domain Restriction](#domain-restriction). |
+| --search-strategy    | SEARCH_STRATEGY     | The benchmarking search strategy. Can be set to `check-all`, `linear-search` or `binary-search`. *Default:* `check-all`. For more details see Section [Benchmarking Search Strategies](#benchmarking-search-strategies). |
+| --reset              | RESET               | Resets the environment before each subexperiment. Useful if execution was aborted and just one experiment should be executed. |
+| --reset-only         | RESET_ONLY          | Only resets the environment. Ignores all other parameters. Useful if execution was aborted and one want a clean state for new executions. |
+| --prometheus         | PROMETHEUS_BASE_URL | Defines where to find the prometheus instance. *Default:* `http://localhost:9090` |
+| --path               | RESULT_PATH         | A directory path for the results. Relative to the Execution folder. *Default:* `results` |
+| --configurations     | CONFIGURATIONS      | Defines environment variables for the use cases and, thus, enables further configuration options. |
+| --threshold          | THRESHOLD           | The threshold for the trend slop that the search strategies use to determine that a load could be handled. *Default:* `2000` |
+
+### Domain Restriction
+
+For dimension value, we have a domain of the amounts of instances. As a consequence, for each dimension value the maximum number of lag experiments is equal to the size of the domain. How the domain is determined is defined by the following domain restriction strategies.
+
+* `no-domain-restriction`: For each dimension value, the domain of instances is equal to the set of all amounts of instances.
+* `restrict-domain`: For each dimension value, the domain is computed as follows:
+    * If the dimension value is the smallest dimension value the domain of the amounts of instances is equal to the set of all amounts of instances.
+    * If the dimension value is not the smallest dimension value and N is the amount of minimal amount of instances that was suitable for the last smaller dimension value the domain for this dimension value contains all amounts of instances greater than, or equal to N.
+
+### Benchmarking Search Strategies
+
+There are the following benchmarking strategies:
+
+* `check-all`: For each dimension value, execute one lag experiment for all amounts of instances within the current domain.
+* `linear-search`: A heuristic which works as follows: For each dimension value, execute one lag experiment for all number of instances within the current domain. The execution order is from the lowest number of instances to the highest amount of instances and the execution for each dimension value is stopped, when a suitable amount of instances is found or if all lag experiments for the dimension value were not successful.
+* `binary-search`: A heuristic which works as follows: For each dimension value, execute one lag experiment for all number of instances within the current domain. The execution order is in a binary-search-like manner. The execution is stopped, when a suitable amount of instances is found or if all lag experiments for the dimension value were not successful.
+
+## Observation
+
+The installed Grafana instance provides a dashboard to observe the benchmark execution. Unless configured otherwise,
+this dashboard can be accessed via `http://<cluster-ip>:31199` or via `http://localhost:31199` if proxied with
+`kubectl port-forward svc/grafana 8080:service`. Default credentials are user *admin* with password *admin*.
+
+
+## Local Execution (e.g. for Development)
+
+As an alternative to executing Theodolite as a Kubernetes Job, it is also possible to run it from your local system,
+for example, for development purposes. In addition to the generel installation instructions, the following adjustments
+are neccessary.
+
+### Installation
+
+For local execution a **Python 3.7** installation is required. We suggest to use a virtual environment placed in the `.venv`
+directory (in the Theodolite root directory). A set of requirements is needed. You can install them with the following
+command (make sure to be in your virtual environment if you use one):
 
 ```sh
-./theodolite.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
+pip install -r requirements.txt
+```
+
+Kubernetes volumes and service accounts, roles, and role bindings for Theodolite are not required in this case.
+
+### Local Execution
+
+The `theodolite.py` is the entrypoint for all benchmark executions. Is has to be called as follows:
+
+```python
+python theodolite.py --uc <uc> --loads <load> [<load> ...] --instances <instances> [<instances> ...]
 ```
 
-* `<use-case>`: Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`.
-* `<wl-values>`: Values for the workload generator to be tested, separated by commas and quoted. For example `"100000, 200000, 300000"`.
-* `<instances>`: Numbers of instances to be benchmarked, separated by commas and quoted. For example `"1, 2, 3, 4"`.
-* `<partitions>`: Number of partitions for Kafka topics. Optional. Default `40`.
-* `<cpu-limit>`: Kubernetes CPU limit. Optional. Default `1000m`.
-* `<memory-limit>`: Kubernetes memory limit. Optional. Default `4Gi`.
-* `<commit-interval>`: Kafka Streams' commit interval in milliseconds. Optional. Default `100`.
-* `<duration>`: Duration in minutes subexperiments should be executed for. Optional. Default `5`.
+This command is the minimal command for execution. Further configurations options are described [above](#configuration)
+or available via `python theodolite.py -h`.
\ No newline at end of file
diff --git a/execution/infrastructure/grafana/values.yaml b/execution/infrastructure/grafana/values.yaml
index 211a72a61a2699c7108ec4adb9a7edebbccecb69..562516ad76f9a0f88c0db8557da51178dbbc9871 100644
--- a/execution/infrastructure/grafana/values.yaml
+++ b/execution/infrastructure/grafana/values.yaml
@@ -11,7 +11,9 @@ adminPassword: admin
 ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
 ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
 sidecar:
-  image: kiwigrid/k8s-sidecar:0.1.99
+  image:
+    repository: "kiwigrid/k8s-sidecar"
+    tag: "1.1.0"
   imagePullPolicy: IfNotPresent
   dashboards:
     enabled: true
diff --git a/execution/infrastructure/kafka-lag-exporter/values.yaml b/execution/infrastructure/kafka-lag-exporter/values.yaml
index b83a911283a7e8264f982f9eb5d550ad5497ec9d..8e53454345df75b55d5d36799dd0b0f0f75233a0 100644
--- a/execution/infrastructure/kafka-lag-exporter/values.yaml
+++ b/execution/infrastructure/kafka-lag-exporter/values.yaml
@@ -1,3 +1,6 @@
+image:
+  pullPolicy: IfNotPresent
+
 clusters:
   - name: "my-confluent-cp-kafka"
     bootstrapBrokers: "my-confluent-cp-kafka:9092"
diff --git a/execution/infrastructure/kafka/values.yaml b/execution/infrastructure/kafka/values.yaml
index 51dcb09d5f24579b148811c8a1c27fe165c3fb56..e65a5fc567d39c7389479d406fa9e6d7156b0f0a 100644
--- a/execution/infrastructure/kafka/values.yaml
+++ b/execution/infrastructure/kafka/values.yaml
@@ -53,8 +53,9 @@ cp-kafka:
     "replica.fetch.max.bytes": "134217728" # 128 MB
     # "default.replication.factor": 3
     # "min.insync.replicas": 2
-    # "auto.create.topics.enable": false
+    "auto.create.topics.enable": false
     "log.retention.ms": "10000" # 10s
+    #"log.retention.ms": "86400000" # 24h
     "metrics.sample.window.ms": "5000" #5s
 
 ## ------------------------------------------------------
diff --git a/execution/infrastructure/kubernetes/rbac/role-binding.yaml b/execution/infrastructure/kubernetes/rbac/role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef2d0c015a1b42880f9652bc241950548a952792
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/role-binding.yaml
@@ -0,0 +1,11 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: theodolite
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: theodolite
+subjects:
+- kind: ServiceAccount
+  name: theodolite
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/rbac/role.yaml b/execution/infrastructure/kubernetes/rbac/role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..84ba14a8bc7a6eceb8a20596ede057ca2271b967
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/role.yaml
@@ -0,0 +1,41 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: theodolite
+rules:
+  - apiGroups:
+    - apps
+    resources:
+    - deployments
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - services
+    - pods
+    - servicemonitors
+    - configmaps
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - pods/exec
+    verbs:
+    - create
+    - get
+  - apiGroups:
+    - monitoring.coreos.com
+    resources:
+    - servicemonitors
+    verbs:
+    - delete
+    - list
+    - create
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/rbac/service-account.yaml b/execution/infrastructure/kubernetes/rbac/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7f33076e31ac53d02491c80fd61cdc5b241dfd7
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/service-account.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: theodolite
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/volume-hostpath.yaml b/execution/infrastructure/kubernetes/volume-hostpath.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b0a132dd1922652e52daa0f691a6014a9b8ec1a8
--- /dev/null
+++ b/execution/infrastructure/kubernetes/volume-hostpath.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: theodolite-pv-volume
+  labels:
+    type: local
+spec:
+  storageClassName: theodolite
+  capacity:
+    storage: 100m
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: </your/path/to/results/folder>
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: theodolite-pv-claim
+spec:
+  storageClassName: theodolite
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 100m
diff --git a/execution/infrastructure/kubernetes/volume-local.yaml b/execution/infrastructure/kubernetes/volume-local.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78c1501afedccf03d3f415c928010dbb5d131c70
--- /dev/null
+++ b/execution/infrastructure/kubernetes/volume-local.yaml
@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: theodolite-pv-volume
+spec:
+  capacity:
+    storage: 1Gi
+  volumeMode: Filesystem
+  accessModes:
+  - ReadWriteOnce
+  persistentVolumeReclaimPolicy: Delete
+  storageClassName: local-storage
+  local:
+    path: </your/path/to/results/folder>
+  nodeAffinity:
+    required:
+      nodeSelectorTerms:
+      - matchExpressions:
+        - key: kubernetes.io/hostname
+          operator: In
+          values:
+          - <node-name>
+---
+# https://kubernetes.io/docs/concepts/storage/storage-classes/#local
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: local-storage
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: theodolite-pv-claim
+spec:
+  storageClassName: local-storage
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
index 23e3d5f6c9552814f5301cd81e517f49d044cd33..5b78ef3653753a2b95ac9b74bf8de156a71fb14c 100644
--- a/execution/lag_analysis.py
+++ b/execution/lag_analysis.py
@@ -5,151 +5,163 @@ from datetime import datetime, timedelta, timezone
 import pandas as pd
 import matplotlib.pyplot as plt
 import csv
+import logging
 
-#
-exp_id =  sys.argv[1]
-benchmark = sys.argv[2]
-dim_value = sys.argv[3]
-instances = sys.argv[4]
-execution_minutes = int(sys.argv[5])
-time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
 
-#http://localhost:9090/api/v1/query_range?query=sum%20by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)&start=2015-07-01T20:10:30.781Z&end=2020-07-01T20:11:00.781Z&step=15s
+def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
+    print("Main")
+    time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
 
-now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0)
-now = now_local - timedelta(milliseconds=time_diff_ms)
-print(f"Now Local: {now_local}")
-print(f"Now Used: {now}")
+    now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0)
+    now = now_local - timedelta(milliseconds=time_diff_ms)
+    print(f"Now Local: {now_local}")
+    print(f"Now Used: {now}")
 
-end = now
-start = now - timedelta(minutes=execution_minutes)
+    end = now
+    start = now - timedelta(minutes=execution_minutes)
 
-#print(start.isoformat().replace('+00:00', 'Z'))
-#print(end.isoformat().replace('+00:00', 'Z'))
+    #print(start.isoformat().replace('+00:00', 'Z'))
+    #print(end.isoformat().replace('+00:00', 'Z'))
 
-response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={
-    #'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
-    'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        # 'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+        'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
+    # response
+    # print(response.request.path_url)
+    # response.content
+    results = response.json()['data']['result']
 
-#response
-#print(response.request.path_url)
-#response.content
-results = response.json()['data']['result']
+    d = []
 
-d = []
+    for result in results:
+        # print(result['metric']['topic'])
+        topic = result['metric']['topic']
+        for value in result['values']:
+            # print(value)
+            d.append({'topic': topic, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
 
-for result in results:
-    #print(result['metric']['topic'])
-    topic = result['metric']['topic']
-    for value in result['values']:
-        #print(value)
-        d.append({'topic': topic, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+    df = pd.DataFrame(d)
 
-df = pd.DataFrame(d)
+    # Do some analysis
 
-# Do some analysis
+    input = df.loc[df['topic'] == "input"]
 
-input = df.loc[df['topic'] == "input"]
+    # input.plot(kind='line',x='timestamp',y='value',color='red')
+    # plt.show()
 
-#input.plot(kind='line',x='timestamp',y='value',color='red')
-#plt.show()
+    from sklearn.linear_model import LinearRegression
 
-from sklearn.linear_model import LinearRegression
+    # values converts it into a numpy array
+    X = input.iloc[:, 1].values.reshape(-1, 1)
+    # -1 means that calculate the dimension of rows, but have 1 column
+    Y = input.iloc[:, 2].values.reshape(-1, 1)
+    linear_regressor = LinearRegression()  # create object for the class
+    linear_regressor.fit(X, Y)  # perform linear regression
+    Y_pred = linear_regressor.predict(X)  # make predictions
 
-X = input.iloc[:, 1].values.reshape(-1, 1)  # values converts it into a numpy array
-Y = input.iloc[:, 2].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
-linear_regressor = LinearRegression()  # create object for the class
-linear_regressor.fit(X, Y)  # perform linear regression
-Y_pred = linear_regressor.predict(X)  # make predictions
+    print(linear_regressor.coef_)
 
-print(linear_regressor.coef_)
+    # print(Y_pred)
 
-#print(Y_pred)
+    fields = [exp_id, datetime.now(), benchmark, dim_value,
+              instances, linear_regressor.coef_]
+    print(fields)
+    with open(f'{result_path}/results.csv', 'a') as f:
+        writer = csv.writer(f)
+        writer.writerow(fields)
 
-fields=[exp_id, datetime.now(), benchmark, dim_value, instances, linear_regressor.coef_]
-print(fields)
-with open(r'results.csv', 'a') as f:
-    writer = csv.writer(f)
-    writer.writerow(fields)
+    filename = f"{result_path}/exp{exp_id}_{benchmark}_{dim_value}_{instances}"
 
-filename = f"exp{exp_id}_{benchmark}_{dim_value}_{instances}"
+    plt.plot(X, Y)
+    plt.plot(X, Y_pred, color='red')
 
-plt.plot(X, Y)
-plt.plot(X, Y_pred, color='red')
+    plt.savefig(f"{filename}_plot.png")
 
-plt.savefig(f"{filename}_plot.png")
+    df.to_csv(f"{filename}_values.csv")
 
-df.to_csv(f"{filename}_values.csv")
+    # Load total lag count
 
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
 
-# Load total lag count
+    results = response.json()['data']['result']
 
-response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={
-    'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    d = []
 
-results = response.json()['data']['result']
+    for result in results:
+        # print(result['metric']['topic'])
+        group = result['metric']['group']
+        for value in result['values']:
+            # print(value)
+            d.append({'group': group, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
 
-d = []
+    df = pd.DataFrame(d)
 
-for result in results:
-    #print(result['metric']['topic'])
-    group = result['metric']['group']
-    for value in result['values']:
-        #print(value)
-        d.append({'group': group, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+    df.to_csv(f"{filename}_totallag.csv")
 
-df = pd.DataFrame(d)
+    # Load partition count
 
-df.to_csv(f"{filename}_totallag.csv")
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
 
+    results = response.json()['data']['result']
 
-# Load partition count
+    d = []
 
-response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={
-    'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    for result in results:
+        # print(result['metric']['topic'])
+        topic = result['metric']['topic']
+        for value in result['values']:
+            # print(value)
+            d.append({'topic': topic, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
 
-results = response.json()['data']['result']
+    df = pd.DataFrame(d)
 
-d = []
+    df.to_csv(f"{filename}_partitions.csv")
 
-for result in results:
-    #print(result['metric']['topic'])
-    topic = result['metric']['topic']
-    for value in result['values']:
-        #print(value)
-        d.append({'topic': topic, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+    # Load instances count
 
-df = pd.DataFrame(d)
+    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
+        'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
+        'start': start.isoformat(),
+        'end': end.isoformat(),
+        'step': '5s'})
 
-df.to_csv(f"{filename}_partitions.csv")
+    results = response.json()['data']['result']
 
+    d = []
 
-# Load instances count
+    for result in results:
+        for value in result['values']:
+            # print(value)
+            d.append({'timestamp': int(value[0]), 'value': int(value[1])})
 
-response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={
-    'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
-    'start': start.isoformat(),
-    'end': end.isoformat(),
-    'step': '5s'})
+    df = pd.DataFrame(d)
 
-results = response.json()['data']['result']
+    df.to_csv(f"{filename}_instances.csv")
 
-d = []
 
-for result in results:
-    for value in result['values']:
-        #print(value)
-        d.append({'timestamp': int(value[0]), 'value': int(value[1])})
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.INFO)
 
-df = pd.DataFrame(d)
+    # Load arguments
+    exp_id = sys.argv[1]
+    benchmark = sys.argv[2]
+    dim_value = sys.argv[3]
+    instances = sys.argv[4]
+    execution_minutes = int(sys.argv[5])
 
-df.to_csv(f"{filename}_instances.csv")
\ No newline at end of file
+    main(exp_id, benchmark, dim_value, instances, execution_minutes,
+        'http://localhost:9090', 'results')
diff --git a/application-kafkastreams-commons/src/test/java/.gitkeep b/execution/lib/__init__.py
similarity index 100%
rename from application-kafkastreams-commons/src/test/java/.gitkeep
rename to execution/lib/__init__.py
diff --git a/execution/lib/cli_parser.py b/execution/lib/cli_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..de609bc55e21e9467a2b28168be6e478171cfddd
--- /dev/null
+++ b/execution/lib/cli_parser.py
@@ -0,0 +1,167 @@
+import argparse
+import os
+
+
+def env_list_default(env, tf):
+    """
+    Makes a list from an environment string.
+    """
+    v = os.environ.get(env)
+    if v is not None:
+        v = [tf(s) for s in v.split(',')]
+    return v
+
+
+def key_values_to_dict(kvs):
+    """
+    Given a list with key values in form `Key=Value` it creates a dict from it.
+    """
+    my_dict = {}
+    for kv in kvs:
+        k, v = kv.split("=")
+        my_dict[k] = v
+    return my_dict
+
+
+def env_dict_default(env):
+    """
+    Makes a dict from an environment string.
+    """
+    v = os.environ.get(env)
+    if v is not None:
+        return key_values_to_dict(v.split(','))
+    else:
+        return dict()
+
+
+class StoreDictKeyPair(argparse.Action):
+    def __init__(self, option_strings, dest, nargs=None, **kwargs):
+        self._nargs = nargs
+        super(StoreDictKeyPair, self).__init__(
+            option_strings, dest, nargs=nargs, **kwargs)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        my_dict = key_values_to_dict(values)
+        setattr(namespace, self.dest, my_dict)
+
+
+def default_parser(description):
+    """
+    Returns the default parser that can be used for thodolite and run uc py
+    :param description: The description the argument parser should show.
+    """
+    parser = argparse.ArgumentParser(description=description)
+    parser.add_argument('--uc',
+                        metavar='<uc>',
+                        default=os.environ.get('UC'),
+                        help='[mandatory] use case number, one of 1, 2, 3 or 4')
+    parser.add_argument('--partitions', '-p',
+                        metavar='<partitions>',
+                        type=int,
+                        default=os.environ.get('PARTITIONS', 40),
+                        help='Number of partitions for Kafka topics')
+    parser.add_argument('--cpu-limit', '-cpu',
+                        metavar='<CPU limit>',
+                        default=os.environ.get('CPU_LIMIT', '1000m'),
+                        help='Kubernetes CPU limit')
+    parser.add_argument('--memory-limit', '-mem',
+                        metavar='<memory limit>',
+                        default=os.environ.get('MEMORY_LIMIT', '4Gi'),
+                        help='Kubernetes memory limit')
+    parser.add_argument('--duration', '-d',
+                        metavar='<duration>',
+                        type=int,
+                        default=os.environ.get('DURATION', 5),
+                        help='Duration in minutes subexperiments should be \
+                                executed for')
+    parser.add_argument('--namespace',
+                        metavar='<NS>',
+                        default=os.environ.get('NAMESPACE', 'default'),
+                        help='Defines the Kubernetes where the applications should run')
+    parser.add_argument('--reset',
+                        action="store_true",
+                        default=os.environ.get(
+                            'RESET', 'false').lower() == 'true',
+                        help='Resets the environment before execution')
+    parser.add_argument('--reset-only',
+                        action="store_true",
+                        default=os.environ.get(
+                            'RESET_ONLY', 'false').lower() == 'true',
+                        help='Only resets the environment. Ignores all other parameters')
+    parser.add_argument('--prometheus',
+                        metavar='<URL>',
+                        default=os.environ.get(
+                            'PROMETHEUS_BASE_URL', 'http://localhost:9090'),
+                        help='Defines where to find the prometheus instance')
+    parser.add_argument('--path',
+                        metavar='<path>',
+                        default=os.environ.get('RESULT_PATH', 'results'),
+                        help='A directory path for the results')
+    parser.add_argument("--configurations",
+                        metavar="KEY=VAL",
+                        dest="configurations",
+                        action=StoreDictKeyPair,
+                        nargs="+",
+                        default=env_dict_default('CONFIGURATIONS'),
+                        help='Defines the environment variables for the UC')
+    return parser
+
+
+def benchmark_parser(description):
+    """
+    Parser for the overall benchmark execution
+    :param description: The description the argument parser should show.
+    """
+    parser = default_parser(description)
+
+    parser.add_argument('--loads',
+                        metavar='<load>',
+                        type=int,
+                        nargs='+',
+                        default=env_list_default('LOADS', int),
+                        help='[mandatory] Loads that should be executed')
+    parser.add_argument('--instances', '-i',
+                        dest='instances_list',
+                        metavar='<instances>',
+                        type=int,
+                        nargs='+',
+                        default=env_list_default('INSTANCES', int),
+                        help='[mandatory] List of instances used in benchmarks')
+    parser.add_argument('--domain-restriction',
+                        action="store_true",
+                        default=os.environ.get(
+                            'DOMAIN_RESTRICTION', 'false').lower() == 'true',
+                        help='To use domain restriction. For details see README')
+    parser.add_argument('--search-strategy',
+                        metavar='<strategy>',
+                        default=os.environ.get('SEARCH_STRATEGY', 'default'),
+                        help='The benchmarking search strategy. Can be set to default, linear-search or binary-search')
+    parser.add_argument('--threshold',
+                        type=int,
+                        metavar='<threshold>',
+                        default=os.environ.get('THRESHOLD', 2000),
+                        help='The threshold for the trend slop that the search strategies use to determine that a load could be handled')
+    return parser
+
+
+def execution_parser(description):
+    """
+    Parser for executing one use case
+    :param description: The description the argument parser should show.
+    """
+    parser = default_parser(description)
+    parser.add_argument('--exp-id',
+                        metavar='<exp id>',
+                        default=os.environ.get('EXP_ID'),
+                        help='[mandatory] ID of the experiment')
+    parser.add_argument('--load',
+                        metavar='<load>',
+                        type=int,
+                        default=os.environ.get('LOAD'),
+                        help='[mandatory] Load that should be used for benchmakr')
+    parser.add_argument('--instances',
+                        metavar='<instances>',
+                        type=int,
+                        default=os.environ.get('INSTANCES'),
+                        help='[mandatory] Numbers of instances to be benchmarked')
+    return parser
diff --git a/execution/lib/trend_slope_computer.py b/execution/lib/trend_slope_computer.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ae26cfd275f53307e19532f047e5e0a9326d3a
--- /dev/null
+++ b/execution/lib/trend_slope_computer.py
@@ -0,0 +1,19 @@
+from sklearn.linear_model import LinearRegression
+import pandas as pd
+import os
+
+def compute(directory, filename, warmup_sec):
+    df = pd.read_csv(os.path.join(directory, filename))
+    input = df
+    input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
+    regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
+
+    X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array
+    Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
+    linear_regressor = LinearRegression()  # create object for the class
+    linear_regressor.fit(X, Y)  # perform linear regression
+    Y_pred = linear_regressor.predict(X)  # make predictions
+
+    trend_slope = linear_regressor.coef_[0][0]
+
+    return trend_slope
diff --git a/execution/requirements.txt b/execution/requirements.txt
index 7224efe80aa1686bb3de90b2beac5df47a56ed8f..18a06882007eebf69bf3bf4f84b869454b36a0a6 100644
--- a/execution/requirements.txt
+++ b/execution/requirements.txt
@@ -1,4 +1,8 @@
 matplotlib==3.2.0
 pandas==1.0.1
 requests==2.23.0
-scikit-learn==0.22.2.post1
\ No newline at end of file
+scikit-learn==0.22.2.post1
+
+# For run_uc.py
+kubernetes==11.0.0
+confuse==1.1.0
diff --git a/execution/run_uc.py b/execution/run_uc.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0fcdbb6d57e5dc67d18e69b7d07fcdbfa809307
--- /dev/null
+++ b/execution/run_uc.py
@@ -0,0 +1,592 @@
+import argparse  # parse arguments from cli
+import atexit  # used to clear resources at exit of program (e.g. ctrl-c)
+from kubernetes import client, config  # kubernetes api
+from kubernetes.stream import stream
+import lag_analysis
+import logging  # logging
+from os import path, environ  # path utilities
+from lib.cli_parser import execution_parser
+import subprocess  # execute bash commands
+import sys  # for exit of program
+import time  # process sleep
+import yaml  # convert from file to yaml object
+
+coreApi = None  # acces kubernetes core api
+appsApi = None  # acces kubernetes apps api
+customApi = None  # acces kubernetes custom object api
+
+namespace = None
+
+
+def load_variables():
+    """Load the CLI variables given at the command line"""
+    print('Load CLI variables')
+    parser = execution_parser(description='Run use case Programm')
+    args = parser.parse_args()
+    print(args)
+    if (args.exp_id is None or args.uc is None or args.load is None or args.instances is None) and not args.reset_only:
+        print('The options --exp-id, --uc, --load and --instances are mandatory.')
+        print('Some might not be set!')
+        sys.exit(1)
+    return args
+
+
+def initialize_kubernetes_api():
+    """Load the kubernetes config from local or the cluster and creates
+    needed APIs.
+    """
+    global coreApi, appsApi, customApi
+    print('Connect to kubernetes api')
+    try:
+        config.load_kube_config()  # try using local config
+    except config.config_exception.ConfigException as e:
+        # load config from pod, if local config is not available
+        logging.debug(
+            'Failed loading local Kubernetes configuration try from cluster')
+        logging.debug(e)
+        config.load_incluster_config()
+
+    coreApi = client.CoreV1Api()
+    appsApi = client.AppsV1Api()
+    customApi = client.CustomObjectsApi()
+
+
+def create_topics(topics):
+    """Create the topics needed for the use cases
+    :param topics: List of topics that should be created.
+    """
+    # Calling exec and waiting for response
+    print('Create topics')
+    for (topic, partitions) in topics:
+        print(f'Create topic {topic} with #{partitions} partitions')
+        exec_command = [
+            '/bin/sh',
+            '-c',
+            f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181\
+            --create --topic {topic} --partitions {partitions}\
+            --replication-factor 1'
+        ]
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "kafka-client",
+                      namespace,
+                      command=exec_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        print(resp)
+
+
+def load_yaml(file_path):
+    """Creates a yaml file from the file at given path.
+    :param file_path: The path to the file which contains the yaml.
+    :return: The file as a yaml object.
+    """
+    try:
+        f = open(path.join(path.dirname(__file__), file_path))
+        with f:
+            return yaml.safe_load(f)
+    except Exception as e:
+        logging.error('Error opening file %s', file_path)
+        logging.error(e)
+
+
+def load_yaml_files():
+    """Load the needed yaml files and creates objects from them.
+    :return: wg, app_svc, app_svc_monitor ,app_jmx, app_deploy
+    """
+    print('Load kubernetes yaml files')
+    wg = load_yaml('uc-workload-generator/base/workloadGenerator.yaml')
+    app_svc = load_yaml('uc-application/base/aggregation-service.yaml')
+    app_svc_monitor = load_yaml('uc-application/base/service-monitor.yaml')
+    app_jmx = load_yaml('uc-application/base/jmx-configmap.yaml')
+    app_deploy = load_yaml('uc-application/base/aggregation-deployment.yaml')
+
+    print('Kubernetes yaml files loaded')
+    return wg, app_svc, app_svc_monitor, app_jmx, app_deploy
+
+
+def replace_env_value(container, key, value):
+    """
+    Special method to replace in a container with kubernetes env values
+    the value of a given parameter.
+    """
+    next(filter(lambda x: x['name'] == key, container))[
+        'value'] = value
+
+
+def start_workload_generator(wg_yaml, dim_value, uc_id):
+    """Starts the workload generator.
+    :param wg_yaml: The yaml object for the workload generator.
+    :param string dim_value: The dimension value the load generator should use.
+    :param string uc_id: Use case id for which load should be generated.
+    :return:
+        The StatefulSet created by the API or in case it already exist/error
+        the yaml object.
+    """
+    print('Start workload generator')
+
+    num_sensors = dim_value
+    wl_max_records = 150000
+    wl_instances = (num_sensors + wl_max_records - 1) // wl_max_records
+
+    # set parameters special for uc 2
+    if uc_id == '2':
+        print('use uc2 stuff')
+        num_nested_groups = dim_value
+        num_sensors = 4
+        approx_num_sensors = num_sensors ** num_nested_groups
+        wl_instances = (approx_num_sensors +
+                        wl_max_records - 1) // wl_max_records
+
+    # Customize workload generator creations
+    wg_yaml['spec']['replicas'] = wl_instances
+    # Set used use case
+    wg_containter = next(filter(
+        lambda x: x['name'] == 'workload-generator', wg_yaml['spec']['template']['spec']['containers']))
+    wg_containter['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id + \
+        '-workload-generator:latest'
+    # Set environment variables
+
+    replace_env_value(wg_containter['env'], 'NUM_SENSORS', str(num_sensors))
+    replace_env_value(wg_containter['env'], 'INSTANCES', str(wl_instances))
+
+    if uc_id == '2':  # Special configuration for uc2
+        replace_env_value(
+            wg_containter['env'], 'NUM_NESTED_GROUPS', str(num_nested_groups))
+
+    try:
+        wg_ss = appsApi.create_namespaced_deployment(
+            namespace=namespace,
+            body=wg_yaml
+        )
+        print(f'Deployment {wg_ss.metadata.name} created.')
+        return wg_ss
+    except client.rest.ApiException as e:
+        print(f'Deployment creation error: {e.reason}')
+        return wg_yaml
+
+
+def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml,
+                      instances, uc_id, memory_limit, cpu_limit,
+                      configurations):
+    """Applies the service, service monitor, jmx config map and start the
+    use case application.
+
+    :param svc_yaml: The yaml object for the service.
+    :param svc_monitor_yaml: The yaml object for the service monitor.
+    :param jmx_yaml: The yaml object for the jmx config map.
+    :param deploy_yaml: The yaml object for the application.
+    :param int instances: Number of instances for use case application.
+    :param string uc_id: The id of the use case to execute.
+    :param string memory_limit: The memory limit for the application.
+    :param string cpu_limit: The CPU limit for the application.
+    :param dict configurations: A dictionary with ENV variables for configurations.
+    :return:
+        The Service, ServiceMonitor, JMX ConfigMap and Deployment.
+        In case the resource already exist/error the yaml object is returned.
+        return svc, svc_monitor, jmx_cm, app_deploy
+    """
+    print('Start use case application')
+    svc, svc_monitor, jmx_cm, app_deploy = None, None, None, None
+
+    # Create Service
+    try:
+        svc = coreApi.create_namespaced_service(
+            namespace=namespace, body=svc_yaml)
+        print(f'Service {svc.metadata.name} created.')
+    except client.rest.ApiException as e:
+        svc = svc_yaml
+        logging.error("Service creation error: %s", e.reason)
+
+    # Create custom object service monitor
+    try:
+        svc_monitor = customApi.create_namespaced_custom_object(
+            group="monitoring.coreos.com",
+            version="v1",
+            namespace=namespace,
+            plural="servicemonitors",  # CustomResourceDef of ServiceMonitor
+            body=svc_monitor_yaml,
+        )
+        print(f"ServiceMonitor '{svc_monitor['metadata']['name']}' created.")
+    except client.rest.ApiException as e:
+        svc_monitor = svc_monitor_yaml
+        logging.error("ServiceMonitor creation error: %s", e.reason)
+
+    # Apply jmx config map for aggregation service
+    try:
+        jmx_cm = coreApi.create_namespaced_config_map(
+            namespace=namespace, body=jmx_yaml)
+        print(f"ConfigMap '{jmx_cm.metadata.name}' created.")
+    except client.rest.ApiException as e:
+        jmx_cm = jmx_yaml
+        logging.error("ConfigMap creation error: %s", e.reason)
+
+    # Create deployment
+    deploy_yaml['spec']['replicas'] = instances
+    app_container = next(filter(
+        lambda x: x['name'] == 'uc-application',
+        deploy_yaml['spec']['template']['spec']['containers']))
+    app_container['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id \
+        + '-kstreams-app:latest'
+
+    # Set configurations environment parameters for SPE
+    for k, v in configurations.items():
+        # check if environment variable is already definde in yaml
+        env = next(filter(lambda x: x['name'] == k,
+                          app_container['env']), None)
+        if env is not None:
+            env['value'] = v  # replace value
+        else:
+            # create new environment pair
+            conf = {'name': k, 'value': v}
+            app_container['env'].append(conf)
+
+    # Set resources in Kubernetes
+    app_container['resources']['limits']['memory'] = memory_limit
+    app_container['resources']['limits']['cpu'] = cpu_limit
+
+    # Deploy application
+    try:
+        app_deploy = appsApi.create_namespaced_deployment(
+            namespace=namespace,
+            body=deploy_yaml
+        )
+        print(f"Deployment '{app_deploy.metadata.name}' created.")
+    except client.rest.ApiException as e:
+        app_deploy = deploy_yaml
+        logging.error("Deployment creation error: %s", e.reason)
+
+    return svc, svc_monitor, jmx_cm, app_deploy
+
+
+def wait_execution(execution_minutes):
+    """
+    Wait time while in execution.
+    :param int execution_minutes: The duration to wait for execution.
+    """
+    print('Wait while executing')
+
+    for i in range(execution_minutes):
+        time.sleep(60)
+        print(f'Executed: {i+1} minutes')
+    print('Execution finished')
+    return
+
+
+def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
+    """
+    Runs the evaluation function
+    :param string exp_id: ID of the experiment.
+    :param string uc_id: ID of the executed use case.
+    :param int dim_value: The dimension value used for execution.
+    :param int instances: The number of instances used for the execution.
+    :param int execution_minutes: How long the use case where executed.
+    """
+    print('Run evaluation function')
+    try:
+        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances,
+                          execution_minutes, prometheus_base_url,
+                          result_path)
+    except Exception as e:
+        err_msg = 'Evaluation function failed'
+        print(err_msg)
+        logging.exception(err_msg)
+        print('Benchmark execution continues')
+
+    return
+
+
+def delete_resource(obj, del_func):
+    """
+    Helper function to delete kuberentes resources.
+    First tries to delete with the kuberentes object.
+    Then it uses the dict representation of yaml to delete the object.
+    :param obj: Either kubernetes resource object or yaml as a dict.
+    :param del_func: The function that need to be executed for deletion
+    """
+    try:
+        del_func(obj.metadata.name, namespace)
+    except Exception as e:
+        logging.debug(
+            'Error deleting resource with api object, try with dict.')
+        try:
+            del_func(obj['metadata']['name'], namespace)
+        except Exception as e:
+            logging.error("Error deleting resource")
+            logging.error(e)
+            return
+    print('Resource deleted')
+
+
+def stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy):
+    """Stops the applied applications and delete resources.
+    :param wg: The workload generator statefull set.
+    :param app_svc: The application service.
+    :param app_svc_monitor: The application service monitor.
+    :param app_jmx: The application jmx config map.
+    :param app_deploy: The application deployment.
+    """
+    print('Stop use case application and workload generator')
+
+    print('Delete workload generator')
+    delete_resource(wg, appsApi.delete_namespaced_deployment)
+
+    print('Delete app service')
+    delete_resource(app_svc, coreApi.delete_namespaced_service)
+
+    print('Delete service monitor')
+    try:
+        customApi.delete_namespaced_custom_object(
+            group="monitoring.coreos.com",
+            version="v1",
+            namespace=namespace,
+            plural="servicemonitors",
+            name=app_svc_monitor['metadata']['name'])
+        print('Resource deleted')
+    except Exception as e:
+        print('Error deleting service monitor')
+
+    print('Delete jmx config map')
+    delete_resource(app_jmx, coreApi.delete_namespaced_config_map)
+
+    print('Delete uc application')
+    delete_resource(app_deploy, appsApi.delete_namespaced_deployment)
+
+    print('Check all pods deleted.')
+    while True:
+        # Wait bit for deletion
+        time.sleep(2)
+
+        # Count how many pod still need to be deleted
+        no_load = len(coreApi.list_namespaced_pod(
+            namespace, label_selector='app=titan-ccp-load-generator').items)
+        no_uc = len(coreApi.list_namespaced_pod(
+            namespace, label_selector='app=titan-ccp-aggregation').items)
+
+        # Check if all pods deleted
+        if no_load <= 0 and no_uc <= 0:
+            print('All pods deleted.')
+            break
+
+        print(f'#{no_load} load generator and #{no_uc} uc pods needs to be deleted')
+    return
+
+
+def delete_topics(topics):
+    """Delete topics from Kafka.
+    :param topics: List of topics to delete.
+    """
+    print('Delete topics from Kafka')
+
+    topics_delete = 'theodolite-.*|' + '|'.join([ti[0] for ti in topics])
+
+    num_topics_command = [
+        '/bin/sh',
+        '-c',
+        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list \
+        | sed -n -E "/^({topics_delete})\
+        ( - marked for deletion)?$/p" | wc -l'
+    ]
+
+    topics_deletion_command = [
+        '/bin/sh',
+        '-c',
+        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete \
+        --topic "{topics_delete}"'
+    ]
+
+    # Wait that topics get deleted
+    while True:
+        # topic deletion, sometimes a second deletion seems to be required
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "kafka-client",
+                      namespace,
+                      command=topics_deletion_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        print(resp)
+
+        print('Wait for topic deletion')
+        time.sleep(2)
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "kafka-client",
+                      namespace,
+                      command=num_topics_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        if resp == '0':
+            print('Topics deleted')
+            break
+    return
+
+
+def reset_zookeeper():
+    """Delete ZooKeeper configurations used for workload generation.
+    """
+    print('Delete ZooKeeper configurations used for workload generation')
+
+    delete_zoo_data_command = [
+        '/bin/sh',
+        '-c',
+        'zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall '
+        + '/workload-generation'
+    ]
+
+    check_zoo_data_command = [
+        '/bin/sh',
+        '-c',
+        'zookeeper-shell my-confluent-cp-zookeeper:2181 get '
+        + '/workload-generation'
+    ]
+
+    # Wait for configuration deletion
+    while True:
+        # Delete Zookeeper configuration data
+        resp = stream(coreApi.connect_get_namespaced_pod_exec,
+                      "zookeeper-client",
+                      namespace,
+                      command=delete_zoo_data_command,
+                      stderr=True, stdin=False,
+                      stdout=True, tty=False)
+        logging.debug(resp)
+
+        # Check data is deleted
+        client = stream(coreApi.connect_get_namespaced_pod_exec,
+                        "zookeeper-client",
+                        namespace,
+                        command=check_zoo_data_command,
+                        stderr=True, stdin=False,
+                        stdout=True, tty=False,
+                        _preload_content=False)  # Get client for returncode
+        client.run_forever(timeout=60)  # Start the client
+
+        if client.returncode == 1:  # Means data not available anymore
+            print('ZooKeeper reset was successful.')
+            break
+        else:
+            print('ZooKeeper reset was not successful. Retrying in 5s.')
+            time.sleep(5)
+    return
+
+
+def stop_lag_exporter():
+    """
+    Stop the lag exporter in order to reset it and allow smooth execution for
+    next use cases.
+    """
+    print('Stop the lag exporter')
+
+    try:
+        # Get lag exporter
+        pod_list = coreApi.list_namespaced_pod(
+            namespace=namespace, label_selector='app.kubernetes.io/name=kafka-lag-exporter')
+        lag_exporter_pod = pod_list.items[0].metadata.name
+
+        # Delete lag exporter pod
+        res = coreApi.delete_namespaced_pod(
+            name=lag_exporter_pod, namespace=namespace)
+    except ApiException as e:
+        logging.error('Exception while stopping lag exporter')
+        logging.error(e)
+
+    print('Deleted lag exporter pod: ' + lag_exporter_pod)
+    return
+
+
+def reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics):
+    """
+    Stop the applications, delete topics, reset zookeeper and stop lag exporter.
+    """
+    print('Reset cluster')
+    stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy)
+    print('---------------------')
+    delete_topics(topics)
+    print('---------------------')
+    reset_zookeeper()
+    print('---------------------')
+    stop_lag_exporter()
+
+
+def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, execution_minutes, prometheus_base_url, reset, ns, result_path, configurations, reset_only=False):
+    """
+    Main method to execute one time the benchmark for a given use case.
+    Start workload generator/application -> execute -> analyse -> stop all
+    :param string exp_id: The number of executed experiment
+    :param string uc_id: Use case to execute
+    :param int dim_value: Dimension value for load generator.
+    :param int instances: Number of instances for application.
+    :param int partitions: Number of partitions the kafka topics should have.
+    :param string cpu_limit: Max CPU utilazation for application.
+    :param string memory_limit: Max memory utilazation for application.
+    :param int execution_minutes: How long to execute the benchmark.
+    :param boolean reset: Flag for reset of cluster before execution.
+    :param dict configurations: Key value pairs for setting env variables of UC.
+    :param boolean reset_only: Flag to only reset the application.
+    """
+    global namespace
+    namespace = ns
+    wg, app_svc, app_svc_monitor, app_jmx, app_deploy = load_yaml_files()
+    print('---------------------')
+
+    initialize_kubernetes_api()
+    print('---------------------')
+
+    topics = [('input', partitions),
+              ('output', partitions),
+              ('aggregation-feedback', partitions),
+              ('configuration', 1)]
+
+    # Check for reset options
+    if reset_only:
+        # Only reset cluster an then end program
+        reset_cluster(wg, app_svc, app_svc_monitor,
+                      app_jmx, app_deploy, topics)
+        sys.exit()
+    if reset:
+        # Reset cluster before execution
+        print('Reset only mode')
+        reset_cluster(wg, app_svc, app_svc_monitor,
+                      app_jmx, app_deploy, topics)
+        print('---------------------')
+
+    # Register the reset operation so that is executed at the abort of program
+    atexit.register(reset_cluster, wg, app_svc,
+                    app_svc_monitor, app_jmx, app_deploy, topics)
+
+    create_topics(topics)
+    print('---------------------')
+
+    wg = start_workload_generator(wg, dim_value, uc_id)
+    print('---------------------')
+
+    app_svc, app_svc_monitor, app_jmx, app_deploy = start_application(
+        app_svc,
+        app_svc_monitor,
+        app_jmx,
+        app_deploy,
+        instances,
+        uc_id,
+        memory_limit,
+        cpu_limit,
+        configurations)
+    print('---------------------')
+
+    wait_execution(execution_minutes)
+    print('---------------------')
+
+    run_evaluation(exp_id, uc_id, dim_value, instances,
+                   execution_minutes, prometheus_base_url, result_path)
+    print('---------------------')
+
+    # Reset cluster regular, therefore abort exit not needed anymore
+    reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
+    atexit.unregister(reset_cluster)
+
+
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.INFO)
+    args = load_variables()
+    print('---------------------')
+    main(args.exp_id, args.uc, args.load, args.instances, args.partitions,
+         args.cpu_limit, args.memory_limit, args.duration, args.prometheus,
+         args.reset, args.namespace, args.path, args.configurations,
+         args.reset_only)
diff --git a/execution/run_uc1.sh b/execution/run_uc1.sh
index e6a3eb05ed7cca167ccbc9ae8c3d5cbc9803e000..02c46d8832fc800c57453570b14a6bf02681326a 100755
--- a/execution/run_uc1.sh
+++ b/execution/run_uc1.sh
@@ -29,38 +29,59 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc1-workload-generator
 
 # Start application
 REPLICAS=$INSTANCES
-# When not using `sed` anymore, use `kubectl apply -f uc1-application`
-kubectl apply -f uc1-application/aggregation-service.yaml
-kubectl apply -f uc1-application/jmx-configmap.yaml
-kubectl apply -f uc1-application/service-monitor.yaml
-#kubectl apply -f uc1-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc1-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc1-application
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#kubectl delete -f uc1-workload-generator/deployment.yaml
-#sed "s/{{INSTANCES}}/1/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
-#sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc1-application/aggregation-service.yaml
-kubectl delete -f uc1-application/jmx-configmap.yaml
-kubectl delete -f uc1-application/service-monitor.yaml
-#kubectl delete -f uc1-application/aggregation-deployment.yaml
-echo "$APPLICATION_YAML" | kubectl delete -f -
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc1-workload-generator
+kubectl delete -k uc-application/overlay/uc1-application
 
 
 # Delete topics instead of Kafka
diff --git a/execution/run_uc2.sh b/execution/run_uc2.sh
index 76d76cd4dc45b3b5e26ea4033c7afd58268fd3fb..4544d3609ed807141455378b92ce3536ea2f92f6 100755
--- a/execution/run_uc2.sh
+++ b/execution/run_uc2.sh
@@ -30,36 +30,63 @@ WL_MAX_RECORDS=150000
 APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS))
 WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc2-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "4"
+        - name: HIERARCHY
+          value: "full"
+        - name: NUM_NESTED_GROUPS
+          value: "$NUM_NESTED_GROUPS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc2-workload-generator
 
 # Start application
 REPLICAS=$INSTANCES
-# When not using `sed` anymore, use `kubectl apply -f uc2-application`
-kubectl apply -f uc2-application/aggregation-service.yaml
-kubectl apply -f uc2-application/jmx-configmap.yaml
-kubectl apply -f uc2-application/service-monitor.yaml
-#kubectl apply -f uc2-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc2-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc2-application
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#sed "s/{{INSTANCES}}/1/g" uc2-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc2-application/aggregation-service.yaml
-kubectl delete -f uc2-application/jmx-configmap.yaml
-kubectl delete -f uc2-application/service-monitor.yaml
-#kubectl delete -f uc2-application/aggregation-deployment.yaml
-echo "$APPLICATION_YAML" | kubectl delete -f -
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc2-workload-generator
+kubectl delete -k uc-application/overlay/uc2-application
 
 
 # Delete topics instead of Kafka
diff --git a/execution/run_uc3.sh b/execution/run_uc3.sh
index 1e34aea99fdc7a927e1943a397f02e1bb56f6a74..4f2323f937f19d01a73482dea6aeaf5e922a0a3f 100755
--- a/execution/run_uc3.sh
+++ b/execution/run_uc3.sh
@@ -29,40 +29,61 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc3-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc3-workload-generator
+
 
 # Start application
 REPLICAS=$INSTANCES
-# When not using `sed` anymore, use `kubectl apply -f uc3-application`
-kubectl apply -f uc3-application/aggregation-service.yaml
-kubectl apply -f uc3-application/jmx-configmap.yaml
-kubectl apply -f uc3-application/service-monitor.yaml
-#kubectl apply -f uc3-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc3-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc3-application
+kubectl scale deployment uc3-titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#kubectl delete -f uc3-workload-generator/deployment.yaml
-#sed "s/{{INSTANCES}}/1/g" uc3-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc3-application/aggregation-service.yaml
-kubectl delete -f uc3-application/jmx-configmap.yaml
-kubectl delete -f uc3-application/service-monitor.yaml
-#kubectl delete -f uc3-application/aggregation-deployment.yaml
-#sed "s/{{CPU_LIMIT}}/1000m/g; s/{{MEMORY_LIMIT}}/4Gi/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/100/g" uc3-application/aggregation-deployment.yaml | kubectl delete -f -
-echo "$APPLICATION_YAML" | kubectl delete -f -
-
-
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc3-workload-generator
+kubectl delete -k uc-application/overlay/uc3-application
 
 # Delete topics instead of Kafka
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
diff --git a/execution/run_uc4.sh b/execution/run_uc4.sh
index bfd3ed8e2b970b12c5835ba5bcd8ea2dace0d84b..08a38498839ef3c50a39c1ccfbd26914993ffbd3 100755
--- a/execution/run_uc4.sh
+++ b/execution/run_uc4.sh
@@ -29,39 +29,60 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
-WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc4-workload-generator/deployment.yaml)
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
+cat <<EOF >uuc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: $WL_INSTANCES
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
+EOF
+kubectl apply -k uc-workload-generator/overlay/uc4-workload-generator
 
 # Start application
 REPLICAS=$INSTANCES
-#AGGREGATION_DURATION_DAYS=$DIM_VALUE
-# When not using `sed` anymore, use `kubectl apply -f uc4-application`
-kubectl apply -f uc4-application/aggregation-service.yaml
-kubectl apply -f uc4-application/jmx-configmap.yaml
-kubectl apply -f uc4-application/service-monitor.yaml
-#kubectl apply -f uc4-application/aggregation-deployment.yaml
-#sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
-echo "$APPLICATION_YAML" | kubectl apply -f -
-kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
+cat <<EOF >uc-application/overlay/uc4-application/set_paramters.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: $REPLICAS
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+        resources:
+          limits:
+            memory: $MEMORY_LIMIT
+            cpu: $CPU_LIMIT
+EOF
+kubectl apply -k uc-application/overlay/uc4-application
+kubectl scale deployment uc4-titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
-sleep ${EXECUTION_MINUTES}m
+sleep $(($EXECUTION_MINUTES * 60))
 
 # Run eval script
 source ../.venv/bin/activate
 python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
-# Stop wl and app
-#sed "s/{{INSTANCES}}/1/g" uc4-workload-generator/deployment.yaml | kubectl delete -f -
-echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
-kubectl delete -f uc4-application/aggregation-service.yaml
-kubectl delete -f uc4-application/jmx-configmap.yaml
-kubectl delete -f uc4-application/service-monitor.yaml
-#kubectl delete -f uc4-application/aggregation-deployment.yaml
-echo "$APPLICATION_YAML" | kubectl delete -f -
-
+# Stop workload generator and app
+kubectl delete -k uc-workload-generator/overlay/uc4-workload-generator
+kubectl delete -k uc-application/overlay/uc4-application
 
 # Delete topics instead of Kafka
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
diff --git a/execution/strategies/__init__.py b/execution/strategies/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/execution/strategies/config.py b/execution/strategies/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4df97c18ae54c7c181ddf08264c013f9447350f
--- /dev/null
+++ b/execution/strategies/config.py
@@ -0,0 +1,23 @@
+from dataclasses import dataclass
+
+@dataclass
+class ExperimentConfig:
+    """ Wrapper for the configuration of an experiment. """
+    use_case: str
+    exp_id: int
+    dim_values: list
+    replicass: list
+    partitions: int
+    cpu_limit: str
+    memory_limit: str
+    execution_minutes: int
+    prometheus_base_url: str
+    reset: bool
+    namespace: str
+    result_path: str
+    configurations: dict
+    domain_restriction_strategy: object
+    search_strategy: object
+    threshold: int
+    subexperiment_executor: object
+    subexperiment_evaluator: object
diff --git a/execution/strategies/experiment_execution.py b/execution/strategies/experiment_execution.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2ee18f9b79a6e880dbcb69b47061cc5ecc6b9ba
--- /dev/null
+++ b/execution/strategies/experiment_execution.py
@@ -0,0 +1,6 @@
+class ExperimentExecutor:
+    def __init__(self, config):
+        self.config=config
+    
+    def execute(self):
+        self.config.domain_restriction_strategy.execute(self.config)
diff --git a/execution/strategies/strategies/config.py b/execution/strategies/strategies/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c31f8c97a4085931cdfa1fa017d4e5909e21915
--- /dev/null
+++ b/execution/strategies/strategies/config.py
@@ -0,0 +1,19 @@
+from dataclasses import dataclass
+
+@dataclass
+class SubexperimentConfig:
+    """ Wrapper for the configuration of a subexperiment """
+    use_case: str
+    exp_id: int
+    counter: int
+    dim_value: int
+    replicas: int
+    partitions: int
+    cpu_limit: str
+    memory_limit: str
+    execution_minutes: int
+    prometheus_base_url: str
+    reset: bool
+    namespace: str
+    result_path: str
+    configurations: dict
diff --git a/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py b/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..b218731fc76d83347b4dbf10448f01615d378c0b
--- /dev/null
+++ b/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py
@@ -0,0 +1,12 @@
+# The lower bound strategy
+def execute(config):
+    dim_value_index = 0
+    lower_bound_replicas_index = 0
+    subexperiment_counter = 0
+    while dim_value_index < len(config.dim_values) and lower_bound_replicas_index >= 0 and lower_bound_replicas_index < len(config.replicass):
+        lower_bound_replicas_index, subexperiment_counter = config.search_strategy.execute(
+            config=config,
+            dim_value_index=dim_value_index,
+            lower_replicas_bound_index=lower_bound_replicas_index,
+            subexperiment_counter=subexperiment_counter)
+        dim_value_index+=1
\ No newline at end of file
diff --git a/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py b/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5dea56118460b0dfdc6b1c36ce2587b6752512b
--- /dev/null
+++ b/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py
@@ -0,0 +1,11 @@
+# The strategy where the domain contains all amounts of instances
+def execute(config):
+    dim_value_index = 0
+    subexperiment_counter = 0
+    while dim_value_index < len(config.dim_values):
+        _, subexperiment_counter = config.search_strategy.execute(
+            config=config,
+            dim_value_index=dim_value_index,
+            lower_replicas_bound_index=0,
+            subexperiment_counter=subexperiment_counter)
+        dim_value_index+=1
\ No newline at end of file
diff --git a/execution/strategies/strategies/search/binary_search_strategy.py b/execution/strategies/strategies/search/binary_search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..46748cbda250597b3a7644522126268be4599293
--- /dev/null
+++ b/execution/strategies/strategies/search/binary_search_strategy.py
@@ -0,0 +1,50 @@
+# The binary search strategy
+import os
+from strategies.strategies.config import SubexperimentConfig
+
+def binary_search(config, dim_value, lower, upper, subexperiment_counter):
+    if lower == upper:
+        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
+        config.subexperiment_executor.execute(subexperiment_config)
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success: # successful, the upper neighbor is assumed to also has been successful
+            return (lower, subexperiment_counter+1)
+        else: # not successful
+            return (lower+1, subexperiment_counter)
+    elif lower+1==upper:
+        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
+        config.subexperiment_executor.execute(subexperiment_config)
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success: # minimal instances found
+            return (lower, subexperiment_counter)
+        else: # not successful, check if lower+1 instances are sufficient
+            print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[upper]}")
+            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
+            config.subexperiment_executor.execute(subexperiment_config)
+            success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                             config.threshold)
+            if success: # minimal instances found
+                return (upper, subexperiment_counter)
+            else:
+                return (upper+1, subexperiment_counter)
+    else:
+        # test mid
+        mid=(upper+lower)//2
+        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[mid]}")
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
+        config.subexperiment_executor.execute(subexperiment_config)
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success: # success -> search in (lower, mid-1)
+            return binary_search(config, dim_value, lower, mid-1, subexperiment_counter+1)
+        else: # not success -> search in (mid+1, upper)
+            return binary_search(config, dim_value, mid+1, upper, subexperiment_counter+1)
+
+def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
+    upper = len(config.replicass)-1
+    dim_value=config.dim_values[dim_value_index]
+    return binary_search(config, dim_value, lower_replicas_bound_index, upper, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/check_all_strategy.py b/execution/strategies/strategies/search/check_all_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..0861945113b829fa79317d8a1a6312b4d6e4f71d
--- /dev/null
+++ b/execution/strategies/strategies/search/check_all_strategy.py
@@ -0,0 +1,31 @@
+# The check_all strategy
+import os
+from strategies.strategies.config import SubexperimentConfig
+
+
+def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
+    new_lower_replicas_bound_index = lower_replicas_bound_index
+    new_lower_replicas_bound_found = False
+    subexperiments_total = len(config.dim_values) * len(config.replicass)
+    while lower_replicas_bound_index < len(config.replicass):
+        subexperiment_counter += 1
+        dim_value = config.dim_values[dim_value_index]
+        replicas = config.replicass[lower_replicas_bound_index]
+        print(
+            f"Run subexperiment {subexperiment_counter} of {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
+
+        subexperiment_config = SubexperimentConfig(
+            config.use_case, config.exp_id, subexperiment_counter, dim_value,
+            replicas, config.partitions, config.cpu_limit, config.memory_limit,
+            config.execution_minutes, config.prometheus_base_url, config.reset,
+            config.namespace, config.result_path, config.configurations)
+
+        config.subexperiment_executor.execute(subexperiment_config)
+
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success and not new_lower_replicas_bound_found:
+            new_lower_replicas_bound_found = True
+            new_lower_replicas_bound_index = lower_replicas_bound_index
+        lower_replicas_bound_index += 1
+    return (new_lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/linear_search_strategy.py b/execution/strategies/strategies/search/linear_search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e777303742e54cf2a11a1bde60e95b8aa85489d
--- /dev/null
+++ b/execution/strategies/strategies/search/linear_search_strategy.py
@@ -0,0 +1,23 @@
+# The linear-search strategy
+
+import os
+from strategies.strategies.config import SubexperimentConfig
+
+def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
+    subexperiments_total=len(config.dim_values)+len(config.replicass)-1
+    dim_value=config.dim_values[dim_value_index]
+    while lower_replicas_bound_index < len(config.replicass):
+        subexperiment_counter+=1
+        replicas=config.replicass[lower_replicas_bound_index]
+        print(f"Run subexperiment {subexperiment_counter} from at most {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
+
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
+
+        config.subexperiment_executor.execute(subexperiment_config)
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success:
+            return (lower_replicas_bound_index, subexperiment_counter)
+        else:
+            lower_replicas_bound_index+=1
+    return (lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..30188de837746b76113ec635ca77fadc3a91cb92
--- /dev/null
+++ b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
@@ -0,0 +1,29 @@
+import lib.trend_slope_computer as trend_slope_computer
+import logging
+import os
+
+WARMUP_SEC = 60
+
+def execute(config, threshold):
+    """
+    Check the trend slope of the totallag of the subexperiment if it comes below
+    the threshold.
+
+    :param config: Configuration of the subexperiment.
+    :param threshold: The threshold the trendslope need to come below.
+    """
+    cwd = f'{os.getcwd()}/{config.result_path}'
+    file = f"exp{config.exp_id}_uc{config.use_case}_{config.dim_value}_{config.replicas}_totallag.csv"
+
+    try:
+        trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC)
+    except Exception as e:
+        err_msg = 'Computing trend slope failed'
+        print(err_msg)
+        logging.exception(err_msg)
+        print('Mark this subexperiment as not successful and continue benchmark')
+        return False
+
+    print(f"Trend Slope: {trend_slope}")
+
+    return trend_slope < threshold
diff --git a/execution/strategies/subexperiment_execution/subexperiment_executor.py b/execution/strategies/subexperiment_execution/subexperiment_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..6931dacfc72081cbe112c4d6d1003703ba42c526
--- /dev/null
+++ b/execution/strategies/subexperiment_execution/subexperiment_executor.py
@@ -0,0 +1,20 @@
+# Wrapper that makes the execution method of a subexperiment interchangable.
+
+import os
+import run_uc
+
+def execute(subexperiment_config):
+    run_uc.main(
+        exp_id=subexperiment_config.exp_id,
+        uc_id=subexperiment_config.use_case,
+        dim_value=int(subexperiment_config.dim_value),
+        instances=int(subexperiment_config.replicas),
+        partitions=subexperiment_config.partitions,
+        cpu_limit=subexperiment_config.cpu_limit,
+        memory_limit=subexperiment_config.memory_limit,
+        execution_minutes=int(subexperiment_config.execution_minutes),
+        prometheus_base_url=subexperiment_config.prometheus_base_url,
+        reset=subexperiment_config.reset,
+        ns=subexperiment_config.namespace,
+        result_path=subexperiment_config.result_path,
+        configurations=subexperiment_config.configurations)
diff --git a/execution/strategies/tests/.gitignore b/execution/strategies/tests/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..1998c294f84ec0ff4b32396e4cd8e74e352672e6
--- /dev/null
+++ b/execution/strategies/tests/.gitignore
@@ -0,0 +1 @@
+.cache
\ No newline at end of file
diff --git a/execution/strategies/tests/__init__.py b/execution/strategies/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py b/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..d93d4924cf09015c714604f2fc995e1db971e69d
--- /dev/null
+++ b/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py
@@ -0,0 +1,105 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.search.binary_search_strategy as binary_search_strategy
+import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+    pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicass = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as common known arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+       [ True , True , True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, False, True , True , True , True  ],
+       [ False, False, False, False, True , True , True  ],
+       [ False, False, False, False, False, False, True  ],
+       [ False, False, False, False, False, False, False ] 
+    ]
+
+expected_order = [
+        (0,3), # workload dim 0
+        (0,1), 
+        (0,0),
+        (1,3), # workload dim 1
+        (1,1),
+        (1,2),
+        (2,4), # workload dim 2
+        (2,2),
+        (3,4), # workload dim 3
+        (3,2),
+        (3,3),
+        (4,4), # workload dim 4
+        (4,3),
+        (5,5), # workload dim 5
+        (5,6),
+        (6,6) # workload dim 6
+    ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+    global experiment_counter, last_experiment, pp
+    print("Simulate subexperiment with config:")
+    pp.pprint(config)
+    last_experiment = (config.dim_value, config.replicas)
+    experiment_counter += 1
+    print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute(i):
+    print("Evaluating last experiment. Index was:")
+    global expected_order, experiment_counter, last_experiment, successful
+    pp.pprint(last_experiment)
+    print("Index was expected to be:")
+    pp.pprint(expected_order[experiment_counter])
+    assert expected_order[experiment_counter] == last_experiment
+    print("Index was as expected. Evaluation finished.")
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_binary_search_strategy():
+    # declare parameters
+    uc="test-uc"
+    partitions=40
+    cpu_limit="1000m"
+    memory_limit="4Gi"
+    kafka_streams_commit_interval_ms=100
+    execution_minutes=5
+
+    # execute
+    experiment_config = ExperimentConfig(
+        exp_id="0",
+        use_case=uc,
+        dim_values=dim_values,
+        replicass=replicass,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
+        execution_minutes=execution_minutes,
+        domain_restriction_strategy=lower_bound_strategy,
+        search_strategy=binary_search_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_domain_restriction_check_all_strategy.py b/execution/strategies/tests/test_domain_restriction_check_all_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c15daca6ebab3171f0995c048afe56c0185efe56
--- /dev/null
+++ b/execution/strategies/tests/test_domain_restriction_check_all_strategy.py
@@ -0,0 +1,120 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.search.check_all_strategy as check_all_strategy
+import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+    pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicass = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+       [ True , True , True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, False, True , True , True , True  ],
+       [ False, False, False, False, True , True , True  ],
+       [ False, False, False, False, False, False, True  ],
+       [ False, False, False, False, False, False, False ] 
+    ]
+
+# the expected order of executed experiments
+expected_order = [
+        (0,0), # workload dim 0
+        (0,1),
+        (0,2),
+        (0,3),
+        (0,4),
+        (0,5),
+        (0,6),
+        (1,0), # workload dim 1
+        (1,1),
+        (1,2),
+        (1,3),
+        (1,4),
+        (1,5),
+        (1,6),
+        (2,2), # workload dim 2
+        (2,3),
+        (2,4),
+        (2,5),
+        (2,6),
+        (3,2), # workload dim 3
+        (3,3),
+        (3,4),
+        (3,5),
+        (3,6),
+        (4,3), # workload dim 4
+        (4,4),
+        (4,5),
+        (4,6),
+        (5,4), # workload dim 3
+        (5,5),
+        (5,6),
+        (6,6) # workload dim 6
+    ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+    global experiment_counter, last_experiment, pp
+    print("Simulate subexperiment with config:")
+    pp.pprint(config)
+    last_experiment = (config.dim_value, config.replicas)
+    experiment_counter += 1
+    print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute(i):
+    print("Evaluating last experiment. Index was:")
+    global expected_order, experiment_counter, last_experiment, successful
+    pp.pprint(expected_order[experiment_counter])
+    assert expected_order[experiment_counter] == last_experiment
+    print("Index was as expected. Evaluation finished.")
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_linear_search_strategy():
+    # declare parameters
+    uc="test-uc"
+    partitions=40
+    cpu_limit="1000m"
+    memory_limit="4Gi"
+    kafka_streams_commit_interval_ms=100
+    execution_minutes=5
+
+    # execute
+    experiment_config = ExperimentConfig(
+        exp_id="0",
+        use_case=uc,
+        dim_values=dim_values,
+        replicass=replicass,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
+        execution_minutes=execution_minutes,
+        domain_restriction_strategy=lower_bound_strategy,
+        search_strategy=check_all_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py b/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..86e2cd29d187cb83166102c503ee79e5e1424573
--- /dev/null
+++ b/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py
@@ -0,0 +1,101 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.search.linear_search_strategy as linear_search_strategy
+import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+    pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicass = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+       [ True , True , True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, False, True , True , True , True  ],
+       [ False, False, False, False, True , True , True  ],
+       [ False, False, False, False, False, False, True  ],
+       [ False, False, False, False, False, False, False ] 
+    ]
+
+# the expected order of executed experiments
+expected_order = [
+        (0,0),
+        (1,0),
+        (1,1),
+        (1,2),
+        (2,2),
+        (3,2),
+        (3,3),
+        (4,3),
+        (4,4),
+        (5,4),
+        (5,5),
+        (5,6),
+        (6,6)
+    ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+    global experiment_counter, last_experiment, pp
+    print("Simulate subexperiment with config:")
+    pp.pprint(config)
+    last_experiment = (config.dim_value, config.replicas)
+    experiment_counter += 1
+    print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute(i):
+    print("Evaluating last experiment. Index was:")
+    global expected_order, experiment_counter, last_experiment, successful
+    pp.pprint(expected_order[experiment_counter])
+    assert expected_order[experiment_counter] == last_experiment
+    print("Index was as expected. Evaluation finished.")
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_linear_search_strategy():
+    # declare parameters
+    uc="test-uc"
+    partitions=40
+    cpu_limit="1000m"
+    memory_limit="4Gi"
+    kafka_streams_commit_interval_ms=100
+    execution_minutes=5
+
+    # execute
+    experiment_config = ExperimentConfig(
+        exp_id="0",
+        use_case=uc,
+        dim_values=dim_values,
+        replicass=replicass,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
+        execution_minutes=execution_minutes,
+        domain_restriction_strategy=lower_bound_strategy,
+        search_strategy=linear_search_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_binary_search_strategy.py b/execution/strategies/tests/test_no_restriction_binary_search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..4f5da89cc72edd792015763539c9af4677772a79
--- /dev/null
+++ b/execution/strategies/tests/test_no_restriction_binary_search_strategy.py
@@ -0,0 +1,110 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.search.binary_search_strategy as binary_search_strategy
+import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+    pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicass = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as common known arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+       [ True , True , True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, False, True , True , True , True  ],
+       [ False, False, False, False, True , True , True  ],
+       [ False, False, False, False, False, False, True  ],
+       [ False, False, False, False, False, False, False ] 
+    ]
+
+expected_order = [
+        (0,3), # workload dim 0
+        (0,1), 
+        (0,0),
+        (1,3), # workload dim 1
+        (1,1),
+        (1,2),
+        (2,3), # workload dim 2
+        (2,1),
+        (2,2),
+        (3,3), # workload dim 3
+        (3,1),
+        (3,2),
+        (4,3), # workload dim 4
+        (4,5),
+        (4,4),
+        (5,3), # workload dim 5
+        (5,5),
+        (5,6),
+        (6,3), # workload dim 6
+        (6,5),
+        (6,6)
+    ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+    global experiment_counter, last_experiment, pp
+    print("Simulate subexperiment with config:")
+    pp.pprint(config)
+    last_experiment = (config.dim_value, config.replicas)
+    experiment_counter += 1
+    print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute(i):
+    print("Evaluating last experiment. Index was:")
+    global expected_order, experiment_counter, last_experiment, successful
+    pp.pprint(last_experiment)
+    print("Index was expected to be:")
+    pp.pprint(expected_order[experiment_counter])
+    assert expected_order[experiment_counter] == last_experiment
+    print("Index was as expected. Evaluation finished.")
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_binary_search_strategy():
+    # declare parameters
+    uc="test-uc"
+    partitions=40
+    cpu_limit="1000m"
+    memory_limit="4Gi"
+    kafka_streams_commit_interval_ms=100
+    execution_minutes=5
+
+    # execute
+    experiment_config = ExperimentConfig(
+        exp_id="0",
+        use_case=uc,
+        dim_values=dim_values,
+        replicass=replicass,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
+        execution_minutes=execution_minutes,
+        domain_restriction_strategy=no_lower_bound_strategy,
+        search_strategy=binary_search_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_check_all_strategy.py b/execution/strategies/tests/test_no_restriction_check_all_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..f173a3d168704cc7a499933984b6510ebda2751e
--- /dev/null
+++ b/execution/strategies/tests/test_no_restriction_check_all_strategy.py
@@ -0,0 +1,137 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.search.check_all_strategy as check_all_strategy
+import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+    pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicass = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+       [ True , True , True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, False, True , True , True , True  ],
+       [ False, False, False, False, True , True , True  ],
+       [ False, False, False, False, False, False, True  ],
+       [ False, False, False, False, False, False, False ] 
+    ]
+
+# the expected order of executed experiments
+expected_order = [
+        (0,0), # workload dim 0
+        (0,1),
+        (0,2),
+        (0,3),
+        (0,4),
+        (0,5),
+        (0,6),
+        (1,0), # workload dim 1
+        (1,1),
+        (1,2),
+        (1,3),
+        (1,4),
+        (1,5),
+        (1,6),
+        (2,0), # workload dim 2
+        (2,1),
+        (2,2), 
+        (2,3),
+        (2,4),
+        (2,5),
+        (2,6),
+        (3,0), # workload dim 4
+        (3,1),
+        (3,2), 
+        (3,3),
+        (3,4),
+        (3,5),
+        (3,6),
+        (4,0), # workload dim 4
+        (4,1),
+        (4,2), 
+        (4,3),
+        (4,4),
+        (4,5),
+        (4,6),
+        (5,0), # workload dim 5
+        (5,1),
+        (5,2), 
+        (5,3),
+        (5,4),
+        (5,5),
+        (5,6),
+        (6,0), # workload dim 6
+        (6,1),
+        (6,2), 
+        (6,3),
+        (6,4),
+        (6,5),
+        (6,6),
+    ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+    global experiment_counter, last_experiment, pp
+    print("Simulate subexperiment with config:")
+    pp.pprint(config)
+    last_experiment = (config.dim_value, config.replicas)
+    experiment_counter += 1
+    print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute(i):
+    print("Evaluating last experiment. Index was:")
+    global expected_order, experiment_counter, last_experiment, successful
+    pp.pprint(expected_order[experiment_counter])
+    assert expected_order[experiment_counter] == last_experiment
+    print("Index was as expected. Evaluation finished.")
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_linear_search_strategy():
+    # declare parameters
+    uc="test-uc"
+    partitions=40
+    cpu_limit="1000m"
+    memory_limit="4Gi"
+    kafka_streams_commit_interval_ms=100
+    execution_minutes=5
+
+    # execute
+    experiment_config = ExperimentConfig(
+        exp_id="0",
+        use_case=uc,
+        dim_values=dim_values,
+        replicass=replicass,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
+        execution_minutes=execution_minutes,
+        domain_restriction_strategy=no_lower_bound_strategy,
+        search_strategy=check_all_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_linear_search_strategy.py b/execution/strategies/tests/test_no_restriction_linear_search_strategy.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e47c2e95b75ae682e82a02ad3d0a91c5a62f253
--- /dev/null
+++ b/execution/strategies/tests/test_no_restriction_linear_search_strategy.py
@@ -0,0 +1,118 @@
+import pprint
+
+from strategies.config import ExperimentConfig
+import strategies.strategies.search.linear_search_strategy as linear_search_strategy
+import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+
+class Object(object):
+    pass
+
+pp = pprint.PrettyPrinter(indent=4)
+
+dim_values = [0, 1, 2, 3, 4, 5, 6]
+replicass = [0, 1, 2, 3, 4, 5, 6]
+
+# True means the experiment was successful
+# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
+# this means the first row starts with (0,0), the second row with (1, 0) etc.
+successful = [
+       [ True , True , True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, True , True , True , True , True  ],
+       [ False, False, False, True , True , True , True  ],
+       [ False, False, False, False, True , True , True  ],
+       [ False, False, False, False, False, False, True  ],
+       [ False, False, False, False, False, False, False ] 
+    ]
+
+# the expected order of executed experiments
+expected_order = [
+        (0,0), # workload dim 0
+        (1,0), # workload dim 1
+        (1,1),
+        (1,2),
+        (2,0), # workload dim 2
+        (2,1),
+        (2,2),
+        (3,0), # workload dim 3
+        (3,1),
+        (3,2),
+        (3,3),
+        (4,0), # workload dim 4
+        (4,1),
+        (4,2),
+        (4,3),
+        (4,4),
+        (5,0), # workload dim 5
+        (5,1),
+        (5,2),
+        (5,3),
+        (5,4),
+        (5,5),
+        (5,6),
+        (6,0), # workload dim 6
+        (6,1),
+        (6,2),
+        (6,3),
+        (6,4),
+        (6,5),
+        (6,6)
+    ]
+
+last_experiment = (0, 0)
+experiment_counter = -1
+subexperiment_executor = Object()
+
+def subexperiment_executor_executor(config):
+    global experiment_counter, last_experiment, pp
+    print("Simulate subexperiment with config:")
+    pp.pprint(config)
+    last_experiment = (config.dim_value, config.replicas)
+    experiment_counter += 1
+    print("Simulation complete")
+
+subexperiment_executor.execute = subexperiment_executor_executor
+
+
+# returns True if the experiment was successful
+
+subexperiment_evaluator = Object()
+
+def subexperiment_evaluator_execute(i):
+    print("Evaluating last experiment. Index was:")
+    global expected_order, experiment_counter, last_experiment, successful
+    pp.pprint(expected_order[experiment_counter])
+    assert expected_order[experiment_counter] == last_experiment
+    print("Index was as expected. Evaluation finished.")
+    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
+
+subexperiment_evaluator.execute = subexperiment_evaluator_execute
+
+def test_linear_search_strategy():
+    # declare parameters
+    uc="test-uc"
+    partitions=40
+    cpu_limit="1000m"
+    memory_limit="4Gi"
+    kafka_streams_commit_interval_ms=100
+    execution_minutes=5
+
+    # execute
+    experiment_config = ExperimentConfig(
+        exp_id="0",
+        use_case=uc,
+        dim_values=dim_values,
+        replicass=replicass,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
+        execution_minutes=execution_minutes,
+        domain_restriction_strategy=no_lower_bound_strategy,
+        search_strategy=linear_search_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
\ No newline at end of file
diff --git a/execution/theodolite.py b/execution/theodolite.py
new file mode 100755
index 0000000000000000000000000000000000000000..bd273c4405e2a406b5b5537e084957625c19aa96
--- /dev/null
+++ b/execution/theodolite.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+import argparse
+from lib.cli_parser import benchmark_parser
+import logging  # logging
+import os
+import run_uc
+import sys
+from strategies.config import ExperimentConfig
+import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
+import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
+import strategies.strategies.search.check_all_strategy as check_all_strategy
+import strategies.strategies.search.linear_search_strategy as linear_search_strategy
+import strategies.strategies.search.binary_search_strategy as binary_search_strategy
+from strategies.experiment_execution import ExperimentExecutor
+import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
+import strategies.subexperiment_evaluation.subexperiment_evaluator as subexperiment_evaluator
+
+
+def load_variables():
+    """Load the CLI variables given at the command line"""
+    print('Load CLI variables')
+    parser = benchmark_parser("Run theodolite benchmarking")
+    args = parser.parse_args()
+    print(args)
+    if (args.uc is None or args.loads is None or args.instances_list is None) and not args.reset_only:
+        print('The options --uc, --loads and --instances are mandatory.')
+        print('Some might not be set!')
+        sys.exit(1)
+    return args
+
+
+def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
+         duration, domain_restriction, search_strategy, threshold,
+         prometheus_base_url, reset, namespace, result_path, configurations):
+
+    print(
+        f"Domain restriction of search space activated: {domain_restriction}")
+    print(f"Chosen search strategy: {search_strategy}")
+
+    counter_path = f"{result_path}/exp_counter.txt"
+
+    if os.path.exists(counter_path):
+        with open(counter_path, mode="r") as read_stream:
+            exp_id = int(read_stream.read())
+    else:
+        exp_id = 0
+        # Create the directory if not exists
+        os.makedirs(result_path, exist_ok=True)
+
+    # Store metadata
+    separator = ","
+    lines = [
+        f'UC={uc}\n',
+        f'DIM_VALUES={separator.join(map(str, loads))}\n',
+        f'REPLICAS={separator.join(map(str, instances_list))}\n',
+        f'PARTITIONS={partitions}\n',
+        f'CPU_LIMIT={cpu_limit}\n',
+        f'MEMORY_LIMIT={memory_limit}\n',
+        f'EXECUTION_MINUTES={duration}\n',
+        f'DOMAIN_RESTRICTION={domain_restriction}\n',
+        f'SEARCH_STRATEGY={search_strategy}\n',
+        f'CONFIGURATIONS={configurations}'
+    ]
+    with open(f"{result_path}/exp{exp_id}_uc{uc}_meta.txt", "w") as stream:
+        stream.writelines(lines)
+
+    with open(counter_path, mode="w") as write_stream:
+        write_stream.write(str(exp_id + 1))
+
+    domain_restriction_strategy = None
+    search_strategy_method = None
+
+    # Select domain restriction
+    if domain_restriction:
+        # domain restriction
+        domain_restriction_strategy = lower_bound_strategy
+    else:
+        # no domain restriction
+        domain_restriction_strategy = no_lower_bound_strategy
+
+    # select search strategy
+    if search_strategy == "linear-search":
+        print(
+            f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
+        search_strategy_method = linear_search_strategy
+    elif search_strategy == "binary-search":
+        search_strategy_method = binary_search_strategy
+    else:
+        print(
+            f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
+        search_strategy_method = check_all_strategy
+
+    experiment_config = ExperimentConfig(
+        use_case=uc,
+        exp_id=exp_id,
+        dim_values=loads,
+        replicass=instances_list,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        execution_minutes=duration,
+        prometheus_base_url=prometheus_base_url,
+        reset=reset,
+        namespace=namespace,
+        configurations=configurations,
+        result_path=result_path,
+        domain_restriction_strategy=domain_restriction_strategy,
+        search_strategy=search_strategy_method,
+        threshold=threshold,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
+
+    executor = ExperimentExecutor(experiment_config)
+    executor.execute()
+
+
+if __name__ == '__main__':
+    logging.basicConfig(level=logging.INFO)
+    args = load_variables()
+    if args.reset_only:
+        print('Only reset the cluster')
+        run_uc.main(None, None, None, None, None, None, None, None, None,
+                    None, args.namespace, None, None, reset_only=True)
+    else:
+        main(args.uc, args.loads, args.instances_list, args.partitions,
+             args.cpu_limit, args.memory_limit, args.duration,
+             args.domain_restriction, args.search_strategy,
+             args.threshold, args.prometheus, args.reset, args.namespace,
+             args.path, args.configurations)
diff --git a/execution/theodolite.sh b/execution/theodolite.sh
deleted file mode 100755
index 18a6b67a9c321cd1c0ecebca405169ec5b8ade46..0000000000000000000000000000000000000000
--- a/execution/theodolite.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/bash
-
-UC=$1
-IFS=', ' read -r -a DIM_VALUES <<< "$2"
-IFS=', ' read -r -a REPLICAS <<< "$3"
-PARTITIONS=${4:-40}
-CPU_LIMIT=${5:-1000m}
-MEMORY_LIMIT=${6:-4Gi}
-KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
-EXECUTION_MINUTES=${8:-5}
-
-# Get and increment counter
-EXP_ID=$(cat exp_counter.txt 2>/dev/null || echo "0")
-echo $((EXP_ID+1)) > exp_counter.txt
-
-# Store meta information
-IFS=$', '; echo \
-"UC=$UC
-DIM_VALUES=${DIM_VALUES[*]}
-REPLICAS=${REPLICAS[*]}
-PARTITIONS=$PARTITIONS
-CPU_LIMIT=$CPU_LIMIT
-MEMORY_LIMIT=$MEMORY_LIMIT
-KAFKA_STREAMS_COMMIT_INTERVAL_MS=$KAFKA_STREAMS_COMMIT_INTERVAL_MS
-EXECUTION_MINUTES=$EXECUTION_MINUTES
-" >> "exp${EXP_ID}_uc${UC}_meta.txt"
-
-SUBEXPERIMENTS=$((${#DIM_VALUES[@]} * ${#REPLICAS[@]}))
-SUBEXPERIMENT_COUNTER=0
-
-echo "Going to execute $SUBEXPERIMENTS subexperiments in total..."
-for DIM_VALUE in "${DIM_VALUES[@]}"
-do
-    for REPLICA in "${REPLICAS[@]}"
-    do
-        SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1))
-        echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA"
-        ./run_uc$UC.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
-        sleep 10s
-    done
-done
diff --git a/execution/theodolite.yaml b/execution/theodolite.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..06d14a0f589b2ac7a16ebaaae4d1490b840ea57b
--- /dev/null
+++ b/execution/theodolite.yaml
@@ -0,0 +1,51 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: theodolite
+spec:
+  template:
+    spec:
+      volumes:
+      - name: theodolite-pv-storage
+        persistentVolumeClaim:
+          claimName: theodolite-pv-claim
+      containers:
+        - name: theodolite
+          image: ghcr.io/cau-se/theodolite:latest
+          # imagePullPolicy: Never # Used to pull "own" local image
+          env:
+            - name: UC # mandatory
+              value: "1"
+            - name: LOADS # mandatory
+              value: "100000, 200000"
+            - name: INSTANCES # mandatory
+              value: "1, 2, 3"
+            # - name: DURATION
+            #   value: "5"
+            # - name: PARTITIONS
+            #   value: "40"
+            # - name: DOMAIN_RESTRICTION
+            #   value: "True"
+            # - name: SEARCH_STRATEGY
+            #   value: "linear-search"
+            # - name: CPU_LIMIT
+            #   value: "1000m"
+            # - name: MEMORY_LIMIT
+            #   value: "4Gi"
+            - name: PROMETHEUS_BASE_URL
+              value: "http://prometheus-operated:9090"
+            # - name: NAMESPACE
+            #   value: "default"
+            # - name: CONFIGURATIONS
+            #   value: "COMMIT_INTERVAL_MS=100, NUM_STREAM_THREADS=1"
+            - name: RESULT_PATH
+              value: "results"
+            - name: PYTHONUNBUFFERED # Enable logs in Kubernetes
+              value: "1"
+          volumeMounts:
+            - mountPath: "/app/results"
+              name: theodolite-pv-storage
+      restartPolicy: Never
+      # Uncomment if RBAC is enabled and configured
+      # serviceAccountName: theodolite
+  backoffLimit: 4
diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc-application/base/aggregation-deployment.yaml
similarity index 85%
rename from execution/uc1-application/aggregation-deployment.yaml
rename to execution/uc-application/base/aggregation-deployment.yaml
index bcb0a955de0d5ce64fe6bdcba1e537468c833e5b..07732ca1dd1e6b2b06f098dfb10a53d38e8d5cae 100644
--- a/execution/uc1-application/aggregation-deployment.yaml
+++ b/execution/uc-application/base/aggregation-deployment.yaml
@@ -14,8 +14,8 @@ spec:
     spec:
       terminationGracePeriodSeconds: 0
       containers:
-      - name: uc1-application
-        image: "theodolite/theodolite-uc1-kstreams-app:latest"
+      - name: uc-application
+        image: uc-app:latest
         ports:
         - containerPort: 5555
           name: jmx
@@ -24,14 +24,14 @@ spec:
           value: "my-confluent-cp-kafka:9092"
         - name: SCHEMA_REGISTRY_URL
           value: "http://my-confluent-cp-schema-registry:8081"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        - name: COMMIT_INTERVAL_MS # Set as default for the applications
+          value: "100"
         resources:
           limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
+            memory: 4Gi
+            cpu: 1000m
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
diff --git a/execution/uc1-application/aggregation-service.yaml b/execution/uc-application/base/aggregation-service.yaml
similarity index 86%
rename from execution/uc1-application/aggregation-service.yaml
rename to execution/uc-application/base/aggregation-service.yaml
index 85432d04f225c30469f3232153ef6bd72bd02bdf..6317caf9fe624e42449b8f630d040a068709cda3 100644
--- a/execution/uc1-application/aggregation-service.yaml
+++ b/execution/uc-application/base/aggregation-service.yaml
@@ -1,14 +1,14 @@
 apiVersion: v1
 kind: Service
-metadata:  
+metadata:
   name: titan-ccp-aggregation
   labels:
     app: titan-ccp-aggregation
 spec:
   #type: NodePort
-  selector:    
+  selector:
     app: titan-ccp-aggregation
-  ports:  
+  ports:
   - name: http
     port: 80
     targetPort: 80
diff --git a/execution/uc1-application/jmx-configmap.yaml b/execution/uc-application/base/jmx-configmap.yaml
similarity index 100%
rename from execution/uc1-application/jmx-configmap.yaml
rename to execution/uc-application/base/jmx-configmap.yaml
diff --git a/execution/uc-application/base/kustomization.yaml b/execution/uc-application/base/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..24c89cfdafb17cdc91f65198b9faf3665bfc6822
--- /dev/null
+++ b/execution/uc-application/base/kustomization.yaml
@@ -0,0 +1,12 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+commonLabels:
+  app: titan-ccp-aggregation
+
+# Use all resources to compose them into one file
+resources:
+  - aggregation-deployment.yaml
+  - aggregation-service.yaml
+  - service-monitor.yaml
+  - jmx-configmap.yaml
diff --git a/execution/uc1-application/service-monitor.yaml b/execution/uc-application/base/service-monitor.yaml
similarity index 100%
rename from execution/uc1-application/service-monitor.yaml
rename to execution/uc-application/base/service-monitor.yaml
diff --git a/execution/uc-application/overlay/uc1-application/kustomization.yaml b/execution/uc-application/overlay/uc1-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d3820fe392e1d2224d78a8dd2415c4dce37c6e6
--- /dev/null
+++ b/execution/uc-application/overlay/uc1-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc1-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc1-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc1-application/set_paramters.yaml b/execution/uc-application/overlay/uc1-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc1-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-application/overlay/uc2-application/kustomization.yaml b/execution/uc-application/overlay/uc2-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cd32cabf70fdfa666a5703c97bc4e4fad7800ba7
--- /dev/null
+++ b/execution/uc-application/overlay/uc2-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc2-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc2-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc2-application/set_paramters.yaml b/execution/uc-application/overlay/uc2-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc2-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-application/overlay/uc3-application/kustomization.yaml b/execution/uc-application/overlay/uc3-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5722cbca8cc79247063921a55252435804edefe6
--- /dev/null
+++ b/execution/uc-application/overlay/uc3-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc3-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc3-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc3-application/set_paramters.yaml b/execution/uc-application/overlay/uc3-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc3-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-application/overlay/uc4-application/kustomization.yaml b/execution/uc-application/overlay/uc4-application/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b44a9bb643802735b740b74bdb47299fb413e5d3
--- /dev/null
+++ b/execution/uc-application/overlay/uc4-application/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc4-
+
+images:
+  - name: uc-app
+    newName: theodolite/theodolite-uc4-kstreams-app
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-application/overlay/uc4-application/set_paramters.yaml b/execution/uc-application/overlay/uc4-application/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb85048128774ab421b89338d5b1ce23791acac8
--- /dev/null
+++ b/execution/uc-application/overlay/uc4-application/set_paramters.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: uc-application
+        env:
+        - name: COMMIT_INTERVAL_MS
+          value: "100"
+        resources:
+          limits:
+            memory: 4Gi
+            cpu: 1000m
diff --git a/execution/uc-workload-generator/base/kustomization.yaml b/execution/uc-workload-generator/base/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2a2c3de74db5afb7c70b440651b8c0c47720b755
--- /dev/null
+++ b/execution/uc-workload-generator/base/kustomization.yaml
@@ -0,0 +1,5 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+  - workloadGenerator.yaml
diff --git a/execution/uc2-workload-generator/deployment.yaml b/execution/uc-workload-generator/base/workloadGenerator.yaml
similarity index 81%
rename from execution/uc2-workload-generator/deployment.yaml
rename to execution/uc-workload-generator/base/workloadGenerator.yaml
index a7bf66f5e47a6fadfcd294366a3cfdefeaca656a..794468b18dc74ca09872577b5b3c115605bd4620 100644
--- a/execution/uc2-workload-generator/deployment.yaml
+++ b/execution/uc-workload-generator/base/workloadGenerator.yaml
@@ -6,7 +6,7 @@ spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
+  replicas: 1
   template:
     metadata:
       labels:
@@ -15,8 +15,15 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: theodolite/theodolite-uc2-workload-generator:latest
+        image: workload-generator:latest
         env:
+        # Order need to be preserved for run_uc.py
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
+        - name: NUM_NESTED_GROUPS
+          value: "5"
         - name: ZK_HOST
           value: "my-confluent-cp-zookeeper"
         - name: ZK_PORT
@@ -25,13 +32,7 @@ spec:
           value: "my-confluent-cp-kafka:9092"
         - name: SCHEMA_REGISTRY_URL
           value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "4"
-        - name: NUM_NESTED_GROUPS
-          value: "{{NUM_NESTED_GROUPS}}"
         - name: POD_NAME
           valueFrom:
             fieldRef:
               fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..553b769a3bacd3356d6b5af5ba2e865acdd47a7c
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc1-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc1-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b275607c27723b1e7e5e7e2b5c02942731bed809
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ff68743355d55459f2df988e8dd42bf0b3b6ae64
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc2-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc2-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..187cb4717195537288e58035dcdda5f34fc9ceed
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
@@ -0,0 +1,19 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "4"
+        - name: HIERARCHY
+          value: "full"
+        - name: NUM_NESTED_GROUPS
+          value: "5"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a7022480fcfe401f3e4e4c3898c3d79930198d3e
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc3-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc3-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b275607c27723b1e7e5e7e2b5c02942731bed809
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5efb0eb25a26371cdddfcc7969a2d10131dbb448
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
@@ -0,0 +1,15 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+namePrefix: uc4-
+
+images:
+  - name: workload-generator
+    newName: theodolite/theodolite-uc4-workload-generator
+    newTag: latest
+
+bases:
+- ../../base
+
+patchesStrategicMerge:
+- set_paramters.yaml # Patch setting the resource parameters
diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b275607c27723b1e7e5e7e2b5c02942731bed809
--- /dev/null
+++ b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
@@ -0,0 +1,15 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  replicas: 1
+  template:
+    spec:
+      containers:
+      - name: workload-generator
+        env:
+        - name: NUM_SENSORS
+          value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc1-workload-generator/deployment.yaml b/execution/uc1-workload-generator/deployment.yaml
deleted file mode 100644
index e8326926e7bdb1b49be2d1c03f4a8e26ca77a2a6..0000000000000000000000000000000000000000
--- a/execution/uc1-workload-generator/deployment.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: theodolite/theodolite-uc1-workload-generator:latest
-        env:
-        - name: ZK_HOST
-          value: "my-confluent-cp-zookeeper"
-        - name: ZK_PORT
-          value: "2181"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "{{NUM_SENSORS}}"
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml
deleted file mode 100644
index 3eca4749ad1decbf9b3fd1973fcad94febf355d8..0000000000000000000000000000000000000000
--- a/execution/uc2-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc2-application
-        image: "theodolite/theodolite-uc2-kstreams-app:latest"
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        - name: LOG_LEVEL
-          value: "INFO"
-        resources:
-          limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc2-application/aggregation-service.yaml b/execution/uc2-application/aggregation-service.yaml
deleted file mode 100644
index 85432d04f225c30469f3232153ef6bd72bd02bdf..0000000000000000000000000000000000000000
--- a/execution/uc2-application/aggregation-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:  
-  name: titan-ccp-aggregation
-  labels:
-    app: titan-ccp-aggregation
-spec:
-  #type: NodePort
-  selector:    
-    app: titan-ccp-aggregation
-  ports:  
-  - name: http
-    port: 80
-    targetPort: 80
-    protocol: TCP
-  - name: metrics
-    port: 5556
diff --git a/execution/uc2-application/jmx-configmap.yaml b/execution/uc2-application/jmx-configmap.yaml
deleted file mode 100644
index 78496a86b1242a89b9e844ead3e700fd0b9a9667..0000000000000000000000000000000000000000
--- a/execution/uc2-application/jmx-configmap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aggregation-jmx-configmap
-data:
-  jmx-kafka-prometheus.yml: |+
-    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
-    lowercaseOutputName: true
-    lowercaseOutputLabelNames: true
-    ssl: false
diff --git a/execution/uc2-application/service-monitor.yaml b/execution/uc2-application/service-monitor.yaml
deleted file mode 100644
index 4e7e758cacb5086305efa26292ddef2afc958096..0000000000000000000000000000000000000000
--- a/execution/uc2-application/service-monitor.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  labels:
-    app: titan-ccp-aggregation
-    appScope: titan-ccp
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-        app: titan-ccp-aggregation
-  endpoints:
-    - port: metrics
-      interval: 10s
diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml
deleted file mode 100644
index a535b5b6443e89564d4bb0cbe17593c60dc289dc..0000000000000000000000000000000000000000
--- a/execution/uc3-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,57 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc3-application
-        image: "theodolite/theodolite-uc3-kstreams-app:latest"
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: KAFKA_WINDOW_DURATION_MINUTES
-          value: "1"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        resources:
-          limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc3-application/aggregation-service.yaml b/execution/uc3-application/aggregation-service.yaml
deleted file mode 100644
index 85432d04f225c30469f3232153ef6bd72bd02bdf..0000000000000000000000000000000000000000
--- a/execution/uc3-application/aggregation-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:  
-  name: titan-ccp-aggregation
-  labels:
-    app: titan-ccp-aggregation
-spec:
-  #type: NodePort
-  selector:    
-    app: titan-ccp-aggregation
-  ports:  
-  - name: http
-    port: 80
-    targetPort: 80
-    protocol: TCP
-  - name: metrics
-    port: 5556
diff --git a/execution/uc3-application/jmx-configmap.yaml b/execution/uc3-application/jmx-configmap.yaml
deleted file mode 100644
index 78496a86b1242a89b9e844ead3e700fd0b9a9667..0000000000000000000000000000000000000000
--- a/execution/uc3-application/jmx-configmap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aggregation-jmx-configmap
-data:
-  jmx-kafka-prometheus.yml: |+
-    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
-    lowercaseOutputName: true
-    lowercaseOutputLabelNames: true
-    ssl: false
diff --git a/execution/uc3-application/service-monitor.yaml b/execution/uc3-application/service-monitor.yaml
deleted file mode 100644
index 4e7e758cacb5086305efa26292ddef2afc958096..0000000000000000000000000000000000000000
--- a/execution/uc3-application/service-monitor.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  labels:
-    app: titan-ccp-aggregation
-    appScope: titan-ccp
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-        app: titan-ccp-aggregation
-  endpoints:
-    - port: metrics
-      interval: 10s
diff --git a/execution/uc3-workload-generator/deployment.yaml b/execution/uc3-workload-generator/deployment.yaml
deleted file mode 100644
index d323fd089eeaa4542db5a645fb3b08885b8eff26..0000000000000000000000000000000000000000
--- a/execution/uc3-workload-generator/deployment.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: theodolite/theodolite-uc3-workload-generator:latest
-        env:
-        - name: ZK_HOST
-          value: "my-confluent-cp-zookeeper"
-        - name: ZK_PORT
-          value: "2181"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "{{NUM_SENSORS}}"
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml
deleted file mode 100644
index 5f71737046e12b7f0116d59c4b55f0c0de39bbd2..0000000000000000000000000000000000000000
--- a/execution/uc4-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc4-application
-        image: "theodolite/theodolite-uc4-kstreams-app:latest"
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: AGGREGATION_DURATION_DAYS
-          value: "3" #AGGREGATION_DURATION_DAYS
-        - name: AGGREGATION_DURATION_ADVANCE
-          value: "1"
-        - name: COMMIT_INTERVAL_MS
-          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        resources:
-          limits:
-            memory: "{{MEMORY_LIMIT}}"
-            cpu: "{{CPU_LIMIT}}"
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc4-application/aggregation-service.yaml b/execution/uc4-application/aggregation-service.yaml
deleted file mode 100644
index 85432d04f225c30469f3232153ef6bd72bd02bdf..0000000000000000000000000000000000000000
--- a/execution/uc4-application/aggregation-service.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:  
-  name: titan-ccp-aggregation
-  labels:
-    app: titan-ccp-aggregation
-spec:
-  #type: NodePort
-  selector:    
-    app: titan-ccp-aggregation
-  ports:  
-  - name: http
-    port: 80
-    targetPort: 80
-    protocol: TCP
-  - name: metrics
-    port: 5556
diff --git a/execution/uc4-application/jmx-configmap.yaml b/execution/uc4-application/jmx-configmap.yaml
deleted file mode 100644
index 78496a86b1242a89b9e844ead3e700fd0b9a9667..0000000000000000000000000000000000000000
--- a/execution/uc4-application/jmx-configmap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: aggregation-jmx-configmap
-data:
-  jmx-kafka-prometheus.yml: |+
-    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
-    lowercaseOutputName: true
-    lowercaseOutputLabelNames: true
-    ssl: false
diff --git a/execution/uc4-application/service-monitor.yaml b/execution/uc4-application/service-monitor.yaml
deleted file mode 100644
index 4e7e758cacb5086305efa26292ddef2afc958096..0000000000000000000000000000000000000000
--- a/execution/uc4-application/service-monitor.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-apiVersion: monitoring.coreos.com/v1
-kind: ServiceMonitor
-metadata:
-  labels:
-    app: titan-ccp-aggregation
-    appScope: titan-ccp
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-        app: titan-ccp-aggregation
-  endpoints:
-    - port: metrics
-      interval: 10s
diff --git a/execution/uc4-workload-generator/deployment.yaml b/execution/uc4-workload-generator/deployment.yaml
deleted file mode 100644
index 98747b3922d439144e783b0e637cbe68e46f1b88..0000000000000000000000000000000000000000
--- a/execution/uc4-workload-generator/deployment.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: {{INSTANCES}}
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: theodolite/theodolite-uc4-workload-generator:latest
-        env:
-        - name: ZK_HOST
-          value: "my-confluent-cp-zookeeper"
-        - name: ZK_PORT
-          value: "2181"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: NUM_SENSORS
-          value: "{{NUM_SENSORS}}"
-        - name: POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: INSTANCES
-          value: "{{INSTANCES}}"
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
deleted file mode 100644
index 29953ea141f55e3b8fc691d31b5ca8816d89fa87..0000000000000000000000000000000000000000
Binary files a/gradle/wrapper/gradle-wrapper.jar and /dev/null differ
diff --git a/uc3-application/build.gradle b/uc3-application/build.gradle
deleted file mode 100644
index 82df66fae434e5b0a0f9b31ef9a44f04ca857173..0000000000000000000000000000000000000000
--- a/uc3-application/build.gradle
+++ /dev/null
@@ -1,13 +0,0 @@
-allprojects {
-	repositories {
-    	maven {
-    		url 'https://packages.confluent.io/maven/'
-    	}
-	}
-}
-
-dependencies {
-    compile('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT')
-}
-
-mainClassName = "theodolite.uc3.application.HistoryService"
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java b/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
deleted file mode 100644
index ab6f08c017bb78a72c4896d766b38f7b8485c7fb..0000000000000000000000000000000000000000
--- a/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package theodolite.uc3.application;
-
-/**
- * Keys to access configuration parameters.
- */
-public final class ConfigurationKeys {
-
-  public static final String APPLICATION_NAME = "application.name";
-
-  public static final String APPLICATION_VERSION = "application.version";
-
-  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
-
-  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
-
-  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
-
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
-  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
-
-  private ConfigurationKeys() {
-  }
-
-}
diff --git a/uc4-application/build.gradle b/uc4-application/build.gradle
deleted file mode 100644
index c5891b2bfb2073e829ff7013e47a17b1ac2313e5..0000000000000000000000000000000000000000
--- a/uc4-application/build.gradle
+++ /dev/null
@@ -1,13 +0,0 @@
-allprojects {
-	repositories {
-    	maven {
-    		url 'https://packages.confluent.io/maven/'
-    	}
-	}
-}
-
-dependencies {
-    compile('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT')
-}
-
-mainClassName = "theodolite.uc4.application.HistoryService"