diff --git a/.gitignore b/.gitignore
index 71305e60a1056e58f281da4c2ab397539b63ba52..e898f39a2d997d01680ff63d1fe375b4b83b00b7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -33,3 +33,6 @@ tmp/
 
 # Python cache files
 *.pyc
+
+# Helm
+Chart.lock
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 9cf9b7686e06ddba07d010c64822b5f8b4a5a821..16dcb77ce0542edc6283fa100a820cb61940f4dc 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -16,31 +16,16 @@ stages:
     DOCKER_TLS_CERTDIR: "/certs"
 
 
-# Theodolite Framework
+# Theodolite Helm Chart
 
-deploy-theodolite:
-  stage: deploy
-  extends:
-    - .dind
-  script:
-    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t theodolite ./execution
-    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:${DOCKER_TAG_NAME}latest"
-    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
-    - "[ $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$CI_COMMIT_TAG"
-    - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
-    - docker push $CR_HOST/$CR_ORG/theodolite
-    - docker logout
-  rules:
-    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - execution/**/*
-      if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
-      when: always
-    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
-      when: manual
-      allow_failure: true
+lint-helm:
+  stage: check
+  image:
+    name: alpine/helm:3.5.2
+    entrypoint: [""]
+  tags:
+    - exec-docker
+  script: helm lint helm/
 
 
 # Theodolite Benchmarks
@@ -53,10 +38,11 @@ deploy-theodolite:
     GRADLE_OPTS: "-Dorg.gradle.daemon=false"
   cache:
     paths:
-      - .gradle
+      - .gradle/wrapper
+      - .gradle/caches
   before_script:
-    - cd benchmarks
     - export GRADLE_USER_HOME=`pwd`/.gradle
+    - cd theodolite-benchmarks
   rules:
     - if: $CI_COMMIT_TAG
       when: always
@@ -72,9 +58,10 @@ build-benchmarks:
   script: ./gradlew --build-cache assemble
   artifacts:
     paths:
-      - "benchmarks/build/libs/*.jar"
-      - "benchmarks/*/build/distributions/*.tar"
-    expire_in: 1 day
+      - "theodolite-benchmarks/build/libs/*.jar"
+      - "theodolite-benchmarks/*/build/libs/*.jar"
+      - "theodolite-benchmarks/*/build/distributions/*.tar"
+    expire_in: 6 hours
 
 test-benchmarks:
   stage: test
@@ -85,7 +72,7 @@ test-benchmarks:
   artifacts:
     reports:
       junit:
-        - "benchmarks/**/build/test-results/test/TEST-*.xml"
+        - "theodolite-benchmarks/**/build/test-results/test/TEST-*.xml"
 
 checkstyle-benchmarks:
   stage: check
@@ -96,7 +83,7 @@ checkstyle-benchmarks:
   script: ./gradlew checkstyle --continue
   artifacts:
     paths:
-      - "benchmarks/*/build/reports/checkstyle/main.html"
+      - "theodolite-benchmarks/*/build/reports/checkstyle/main.html"
     when: on_failure
     expire_in: 1 day
 
@@ -109,7 +96,7 @@ pmd-benchmarks:
   script: ./gradlew pmd --continue
   artifacts:
     paths:
-      - "benchmarks/*/build/reports/pmd/*.html"
+      - "theodolite-benchmarks/*/build/reports/pmd/*.html"
     when: on_failure
     expire_in: 1 day
 
@@ -122,7 +109,7 @@ spotbugs-benchmarks:
   script: ./gradlew spotbugs --continue
   artifacts:
     paths:
-      - "benchmarks/*/build/reports/spotbugs/*.html"
+      - "theodolite-benchmarks/*/build/reports/spotbugs/*.html"
     when: on_failure
     expire_in: 1 day
 
@@ -149,63 +136,263 @@ spotbugs-benchmarks:
     - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
       when: always
     - changes:
-      - benchmarks/*
-      - benchmarks/$JAVA_PROJECT_NAME/**/*
-      - benchmarks/application-kafkastreams-commons/**/*
-      - benchmarks/workload-generator-commons/**/*
+      - theodolite-benchmarks/*
+      - theodolite-benchmarks/$JAVA_PROJECT_NAME/**/*
+      - theodolite-benchmarks/kstreams-commons/**/*
+      - theodolite-benchmarks/flink-commons/**/*
+      - theodolite-benchmarks/load-generator-commons/**/*
       if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
       when: always
     - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
       when: manual
       allow_failure: true
 
-deploy-uc1-kstreams-app:
+deploy-uc1-kstreams:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc1-kstreams-app"
-    JAVA_PROJECT_NAME: "uc1-application"
+    JAVA_PROJECT_NAME: "uc1-kstreams"
 
-deploy-uc2-kstreams-app:
+deploy-uc2-kstreams:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc2-kstreams-app"
-    JAVA_PROJECT_NAME: "uc2-application"
+    JAVA_PROJECT_NAME: "uc2-kstreams"
 
-deploy-uc3-kstreams-app:
+deploy-uc3-kstreams:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc3-kstreams-app"
-    JAVA_PROJECT_NAME: "uc3-application"
+    JAVA_PROJECT_NAME: "uc3-kstreams"
 
-deploy-uc4-kstreams-app:
+deploy-uc4-kstreams:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc4-kstreams-app"
-    JAVA_PROJECT_NAME: "uc4-application"
+    JAVA_PROJECT_NAME: "uc4-kstreams"
+
+deploy-uc1-flink:
+  extends: .deploy-benchmarks
+  variables:
+    IMAGE_NAME: "theodolite-uc1-flink"
+    JAVA_PROJECT_NAME: "uc1-flink"
+
+deploy-uc2-flink:
+  extends: .deploy-benchmarks
+  variables:
+    IMAGE_NAME: "theodolite-uc2-flink"
+    JAVA_PROJECT_NAME: "uc2-flink"
+
+deploy-uc3-flink:
+  extends: .deploy-benchmarks
+  variables:
+    IMAGE_NAME: "theodolite-uc3-flink"
+    JAVA_PROJECT_NAME: "uc3-flink"
+
+deploy-uc4-flink:
+  extends: .deploy-benchmarks
+  variables:
+    IMAGE_NAME: "theodolite-uc4-flink"
+    JAVA_PROJECT_NAME: "uc4-flink"
 
 deploy-uc1-load-generator:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc1-workload-generator"
-    JAVA_PROJECT_NAME: "uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-load-generator"
 
 deploy-uc2-load-generator:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc2-workload-generator"
-    JAVA_PROJECT_NAME: "uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-load-generator"
 
 deploy-uc3-load-generator:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc3-workload-generator"
-    JAVA_PROJECT_NAME: "uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-load-generator"
 
 deploy-uc4-load-generator:
   extends: .deploy-benchmarks
   variables:
     IMAGE_NAME: "theodolite-uc4-workload-generator"
-    JAVA_PROJECT_NAME: "uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-load-generator"
+      
+
+# Theodolite Framework
+
+.theodolite:
+  image:
+    name: ghcr.io/graalvm/native-image:java11-21.1.0
+    entrypoint: [""]
+  tags:
+    - exec-docker
+  variables:
+    GRADLE_OPTS: "-Dorg.gradle.daemon=false"
+  cache:
+    paths:
+      - .gradle/wrapper
+      - .gradle/caches
+  before_script:
+    - export GRADLE_USER_HOME=`pwd`/.gradle
+    - cd theodolite
+
+build-theodolite-jvm:
+  stage: build
+  extends: .theodolite
+  script: ./gradlew --build-cache assemble
+  artifacts:
+    paths:
+      - "theodolite/build/lib/*"
+      - "theodolite/build/*-runner.jar"
+    expire_in: 6 hours
+
+build-theodolite-native:
+  stage: build
+  extends: .theodolite
+  script:
+    - ./gradlew --build-cache assemble -Dquarkus.package.type=native
+  when: manual
+  artifacts:
+    paths:
+      - "theodolite/build/*-runner"
+    expire_in: 6 hours
+
+test-theodolite:
+  stage: test
+  extends: .theodolite
+  needs:
+    - build-theodolite-jvm
+    #- build-theodolite-native
+  script: ./gradlew test --stacktrace
+  artifacts:
+    reports:
+      junit:
+        - "theodolite/**/build/test-results/test/TEST-*.xml"
+
+# Disabled for now
+.ktlint-theodolite:
+  stage: check
+  extends: .theodolite
+  needs:
+    - build-theodolite-jvm
+    - test-theodolite
+  script: ./gradlew ktlintCheck --continue
+
+# Disabled for now
+.detekt-theodolite: 
+  stage: check
+  extends: .theodolite
+  needs:
+    - build-theodolite-jvm
+    - test-theodolite
+  script: ./gradlew detekt --continue
+
+deploy-theodolite:
+  stage: deploy
+  extends:
+    - .theodolite
+    - .dind
+  needs:
+    #- build-theodolite-native
+    - build-theodolite-jvm
+    - test-theodolite
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    #- docker build -f src/main/docker/Dockerfile.native -t theodolite .
+    - docker build -f src/main/docker/Dockerfile.jvm -t theodolite .
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$CI_COMMIT_TAG"
+    - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
+    - docker push $CR_HOST/$CR_ORG/theodolite
+    - docker logout
+  rules:
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - theodolite/**/*
+      if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: always
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: manual
+      allow_failure: true
+
+
+# Theodolite SLO Checker
+
+test-slo-checker-lag-trend:
+  stage: test
+  image: python:3.7-slim
+  tags:
+    - exec-docker
+  script:
+    - cd slo-checker/record-lag
+    - pip install -r requirements.txt
+    - cd app
+    - python -m unittest
+
+test-slo-checker-dropped-records-kstreams:
+  stage: test
+  image: python:3.7-slim
+  tags:
+    - exec-docker
+  script:
+    - cd slo-checker/dropped-records
+    - pip install -r requirements.txt
+    - cd app
+    - python -m unittest
+
+deploy-slo-checker-lag-trend:
+  stage: deploy
+  extends:
+    - .dind
+  needs:
+    - test-slo-checker-lag-trend
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite-slo-checker-lag-trend slo-checker/record-lag
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-lag-trend $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend:${DOCKER_TAG_NAME}latest"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-lag-trend $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend:$CI_COMMIT_TAG"
+    - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
+    - docker push $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend
+    - docker logout
+  rules:
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - slo-checker/record-lag/**/*
+      if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: always
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: manual
+      allow_failure: true
+
+deploy-slo-checker-dropped-records-kstreams:
+  stage: deploy
+  extends:
+    - .dind
+  needs:
+    - test-slo-checker-dropped-records-kstreams
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite-slo-checker-dropped-records-kstreams slo-checker/dropped-records
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-dropped-records-kstreams $CR_HOST/$CR_ORG/theodolite-slo-checker-dropped-records-kstreams:${DOCKER_TAG_NAME}latest"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-dropped-records-kstreams $CR_HOST/$CR_ORG/theodolite-slo-checker-dropped-records-kstreams:$CI_COMMIT_TAG"
+    - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
+    - docker push $CR_HOST/$CR_ORG/theodolite-slo-checker-dropped-records-kstreams
+    - docker logout
+  rules:
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - slo-checker/dropped-records/**/*
+      if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: always
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: manual
+      allow_failure: true
 
 
 # Theodolite Random Scheduler
@@ -214,6 +401,7 @@ deploy-random-scheduler:
   stage: deploy
   extends:
     - .dind
+  needs: []
   script:
     - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
     - docker build --pull -t theodolite-random-scheduler execution/infrastructure/random-scheduler
diff --git a/CITATION.cff b/CITATION.cff
index ae409536b477586aaabde687b0bfbaef1ae422d3..ca94e1c5039d3aeac3a4535767d5217de4960a6f 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -1,16 +1,29 @@
 cff-version: "1.1.0"
 message: "If you use Theodolite, please cite it using these metadata."
 authors: 
-  -
-    family-names: Henning
+  - family-names: Henning
     given-names: "Sören"
     orcid: "https://orcid.org/0000-0001-6912-2549"
-  -
-    family-names: Hasselbring
+  - family-names: Hasselbring
     given-names: Wilhelm
     orcid: "https://orcid.org/0000-0001-6625-4335"
 title: Theodolite
-version: "0.3.0"
+version: "0.5.1"
 repository-code: "https://github.com/cau-se/theodolite"
 license: "Apache-2.0"
 doi: "10.1016/j.bdr.2021.100209"
+preferred-citation:
+  type: article
+  authors: 
+    - family-names: Henning
+      given-names: "Sören"
+      orcid: "https://orcid.org/0000-0001-6912-2549"
+    - family-names: Hasselbring
+      given-names: Wilhelm
+      orcid: "https://orcid.org/0000-0001-6625-4335"
+  doi: "10.1016/j.bdr.2021.100209"
+  journal: "Big Data Research"
+  month: 7
+  title: "Theodolite: Scalability Benchmarking of Distributed Stream Processing Engines in Microservice Architectures"
+  volume: 25
+  year: 2021
diff --git a/README.md b/README.md
index 9dcceb9e65a8a50d96e579a1d14c9861eb22cc82..804a193df21f3883ecf9a727af5a743b77a9cceb 100644
--- a/README.md
+++ b/README.md
@@ -4,20 +4,17 @@
 
 Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines. It consists of three modules:
 
-## Theodolite Benchmarks
-
-Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams. Benchmark implementation for Apache Flink are currently under development and can be found in the *apache-flink* branch of this repository. The benchmark sources can be found in [Thedolite benchmarks](benchmarks).
-
-
-## Theodolite Execution Framework
-
-Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
+## Theodolite Benchmarking Tool
 
+Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. It is recommended to install Theodolite with the package manager Helm. The Theodolite Helm chart along with instructions how to install it can be found in the [`helm`](helm) directory.
 
 ## Theodolite Analysis Tools
 
-Theodolite's benchmarking method creates a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+Theodolite's benchmarking method maps load intensities to the resource amounts that are required for processing them. A plot showing how resource demand evolves with an increasing load allows to draw conclusions about the scalability of a stream processing engine or its deployment. Theodolite provides Jupyter notebooks for creating such plots based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+
+## Theodolite Benchmarks
 
+Theodolite comes with 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding load generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams and Apache Flink. The benchmark sources can be found in [Thedolite benchmarks](theodolite-benchmarks).
 
 ## How to Cite
 
diff --git a/analysis/demand-metric-plot.ipynb b/analysis/demand-metric-plot.ipynb
index 90ef227dbf6a4566760329b615d5f59b4cc2bc25..71e08f0590f819a63b1bdd6bf13b57ac665f65bc 100644
--- a/analysis/demand-metric-plot.ipynb
+++ b/analysis/demand-metric-plot.ipynb
@@ -1,22 +1,22 @@
 {
  "cells": [
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "# Theodolite Analysis - Plotting the Demand Metric\n",
     "\n",
     "This notebook creates a plot, showing scalability as a function that maps load intensities to the resources required for processing them. It is able to combine multiple such plots in one figure, for example, to compare multiple systems or configurations.\n",
     "\n",
     "The notebook takes a CSV file for each plot mapping load intensities to minimum required resources, computed by the `demand-metric-plot.ipynb` notebook."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "First, we need to import some libraries, which are required for creating the plots."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -33,11 +33,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "We need to specify the directory, where the demand CSV files can be found, and a dictionary that maps a system description (e.g. its name) to the corresponding CSV file (prefix). To use Unicode narrow non-breaking spaces in the description format it as `u\"1000\\u202FmCPU\"`."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -53,11 +53,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "Now, we combie all systems described in `experiments`."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -71,11 +71,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "We might want to display the mappings before we plot it."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -87,11 +87,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "The following code creates a MatPlotLib figure showing the scalability plots for all specified systems. You might want to adjust its styling etc. according to your preferences. Make sure to also set a filename."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -149,27 +149,33 @@
   }
  ],
  "metadata": {
+  "file_extension": ".py",
+  "interpreter": {
+   "hash": "e9e076445e1891a25f59b525adcc71b09846b3f9cf034ce4147fc161b19af121"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.8.10 64-bit ('.venv': venv)",
+   "name": "python3"
+  },
   "language_info": {
-   "name": "python",
    "codemirror_mode": {
     "name": "ipython",
     "version": 3
    },
-   "version": "3.8.5-final"
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
   },
-  "orig_nbformat": 2,
-  "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "npconvert_exporter": "python",
+  "orig_nbformat": 2,
   "pygments_lexer": "ipython3",
-  "version": 3,
-  "kernelspec": {
-   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
-   "display_name": "Python 3.7.0 64-bit ('.venv': venv)",
-   "language": "python"
-  }
+  "version": 3
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
\ No newline at end of file
+}
diff --git a/analysis/demand-metric.ipynb b/analysis/demand-metric.ipynb
index bcea129b7cb07465fa99f32b6f8b2b6115e8a0aa..fbf3ee02960a1e06457eef5dda96cb6d0a1a75ac 100644
--- a/analysis/demand-metric.ipynb
+++ b/analysis/demand-metric.ipynb
@@ -1,6 +1,8 @@
 {
  "cells": [
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "# Theodolite Analysis - Demand Metric\n",
     "\n",
@@ -9,11 +11,11 @@
     "Theodolite's *demand* metric is a function, mapping load intensities to the minimum required resources (e.g., instances) that are required to process this load. With this notebook, the *demand* metric function is approximated by a map of tested load intensities to their minimum required resources.\n",
     "\n",
     "The final output when running this notebook will be a CSV file, providig this mapping. It can be used to create nice plots of a system's scalability using the `demand-metric-plot.ipynb` notebook."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "In the following cell, we need to specifiy:\n",
     "\n",
@@ -22,9 +24,7 @@
     "* `max_lag_trend_slope`: The maximum tolerable increase in queued messages per second.\n",
     "* `measurement_dir`: The directory where the measurement data files are to be found.\n",
     "* `results_dir`: The directory where the computed demand CSV files are to be stored."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -40,11 +40,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "With the following call, we compute our demand mapping."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -58,11 +58,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "We might already want to plot a simple visualization here:"
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -74,11 +74,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "Finally we store the results in a CSV file."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -93,27 +93,33 @@
   }
  ],
  "metadata": {
+  "file_extension": ".py",
+  "interpreter": {
+   "hash": "e9e076445e1891a25f59b525adcc71b09846b3f9cf034ce4147fc161b19af121"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.8.10 64-bit ('.venv': venv)",
+   "name": "python3"
+  },
   "language_info": {
-   "name": "python",
    "codemirror_mode": {
     "name": "ipython",
     "version": 3
    },
-   "version": "3.8.5-final"
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
   },
-  "orig_nbformat": 2,
-  "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "npconvert_exporter": "python",
+  "orig_nbformat": 2,
   "pygments_lexer": "ipython3",
-  "version": 3,
-  "kernelspec": {
-   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
-   "display_name": "Python 3.7.0 64-bit ('.venv': venv)",
-   "language": "python"
-  }
+  "version": 3
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
\ No newline at end of file
+}
diff --git a/analysis/src/demand.py b/analysis/src/demand.py
index dfb20c05af8e9a134eedd2cdb584c961a82369f5..2178ab7c5dc5f7e4c04ebb58d4c14c9bf8b1aeff 100644
--- a/analysis/src/demand.py
+++ b/analysis/src/demand.py
@@ -1,59 +1,51 @@
 import os
 from datetime import datetime, timedelta, timezone
 import pandas as pd
+from pandas.core.frame import DataFrame
 from sklearn.linear_model import LinearRegression
 
 def demand(exp_id, directory, threshold, warmup_sec):
     raw_runs = []
 
-    # Compute SL, i.e., lag trend, for each tested configuration
-    filenames = [filename for filename in os.listdir(directory) if filename.startswith(f"exp{exp_id}") and filename.endswith("totallag.csv")]
+    # Compute SLI, i.e., lag trend, for each tested configuration
+    filenames = [filename for filename in os.listdir(directory) if filename.startswith(f"exp{exp_id}") and "lag-trend" in filename and filename.endswith(".csv")]
     for filename in filenames:
-        #print(filename)
         run_params = filename[:-4].split("_")
-        dim_value = run_params[2]
-        instances = run_params[3]
+        dim_value = run_params[1]
+        instances = run_params[2]
 
         df = pd.read_csv(os.path.join(directory, filename))
-        #input = df.loc[df['topic'] == "input"]
         input = df
-        #print(input)
+
         input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
-        #print(input)
-        #print(input.iloc[0, 'timestamp'])
+    
         regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
-        #regress = input
 
-        #input.plot(kind='line',x='timestamp',y='value',color='red')
-        #plt.show()
+        X = regress.iloc[:, 1].values.reshape(-1, 1)  # values converts it into a numpy array
+        Y = regress.iloc[:, 2].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
 
-        X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array
-        Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
         linear_regressor = LinearRegression()  # create object for the class
         linear_regressor.fit(X, Y)  # perform linear regression
         Y_pred = linear_regressor.predict(X)  # make predictions
 
         trend_slope = linear_regressor.coef_[0][0]
-        #print(linear_regressor.coef_)
 
         row = {'load': int(dim_value), 'resources': int(instances), 'trend_slope': trend_slope}
-        #print(row)
         raw_runs.append(row)
 
     runs = pd.DataFrame(raw_runs)
 
-    # Set suitable = True if SLOs are met, i.e., lag trend is below threshold
-    runs["suitable"] =  runs.apply(lambda row: row['trend_slope'] < threshold, axis=1)
-
-    # Sort results table (unsure if required)
-    runs.columns = runs.columns.str.strip()
-    runs.sort_values(by=["load", "resources"])
+    # Group by the load and resources to handle repetitions, and take from the reptitions the median
+    # for even reptitions, the mean of the two middle values is used
+    medians = runs.groupby(by=['load', 'resources'], as_index=False).median()
 
-    # Filter only suitable configurations
-    filtered = runs[runs.apply(lambda x: x['suitable'], axis=1)]
-
-    # Compute demand per load intensity
-    grouped = filtered.groupby(['load'])['resources'].min()
-    demand_per_load = grouped.to_frame().reset_index()
+    # Set suitable = True if SLOs are met, i.e., lag trend slope is below threshold
+    medians["suitable"] =  medians.apply(lambda row: row['trend_slope'] < threshold, axis=1)
 
+    suitable = medians[medians.apply(lambda x: x['suitable'], axis=1)]
+    
+    # Compute minimal demand per load intensity
+    demand_per_load = suitable.groupby(by=['load'], as_index=False)['resources'].min()
+    
     return demand_per_load
+
diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle
deleted file mode 100644
index ea8fb80bb2c2bac6121dbaaf72f742aa0e9c62bb..0000000000000000000000000000000000000000
--- a/benchmarks/build.gradle
+++ /dev/null
@@ -1,178 +0,0 @@
-// Inherited to all subprojects
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-  dependencies {
-    classpath "gradle.plugin.com.github.spotbugs.snom:spotbugs-gradle-plugin:4.6.0"
-  }
-}
-
-// Variables used to distinct different subprojects
-def useCaseProjects = subprojects.findAll {it -> it.name.matches('uc(.)*')}
-def useCaseApplications = subprojects.findAll {it -> it.name.matches('uc[0-9]+-application')}
-def useCaseGenerators = subprojects.findAll {it -> it.name.matches('uc[0-9]+-workload-generator*')}
-def commonProjects = subprojects.findAll {it -> it.name.matches('(.)*commons(.)*')}
-
-// Plugins
-allprojects {
-  apply plugin: 'eclipse'
-}
-
-subprojects {
-  apply plugin: 'checkstyle'
-  apply plugin: 'pmd'
-  apply plugin: 'com.github.spotbugs'
-  apply plugin: 'java-library'
-}
-
-configure(useCaseProjects){
-    apply plugin: 'application'
-}
-
-// Java version for all subprojects
-subprojects {
-  java {
-    sourceCompatibility = JavaVersion.VERSION_11
-    targetCompatibility = JavaVersion.VERSION_11
-  }
-}
-
-// Check for updates every build
-configurations.all {
-    resolutionStrategy.cacheChangingModulesFor 0, 'seconds'
-}
-
-// Repositories for all projects
-allprojects {
-	repositories {
-	    jcenter()
-	    maven {
-	    	url "https://oss.sonatype.org/content/repositories/snapshots/"
-	    }
-      maven {
-        url 'https://packages.confluent.io/maven/'
-    }
-	}
-}
-
-// Dependencies for all use case applications
-configure(useCaseApplications) {
-  dependencies {
-      // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
-      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
-      implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
-      implementation 'com.google.code.gson:gson:2.8.2'
-      implementation 'com.google.guava:guava:24.1-jre'
-      implementation 'org.slf4j:slf4j-simple:1.7.25'
-      implementation project(':application-kafkastreams-commons')
-
-      // Use JUnit test framework
-      testImplementation 'junit:junit:4.12'
-  }
-}
-
-// Dependencies for all use case generators
-configure(useCaseGenerators) {
-  dependencies {
-      // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
-      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
-      implementation 'org.slf4j:slf4j-simple:1.7.25'
-
-      // These dependencies are used for the workload-generator-commmon
-      implementation project(':workload-generator-commons')
-
-      // Use JUnit test framework
-      testImplementation 'junit:junit:4.12'
-  }
-}
-
-// Dependencies for all commons
-configure(commonProjects) {
-  dependencies {
-      // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-      implementation 'org.slf4j:slf4j-simple:1.7.25'
-      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
-      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
-      implementation 'org.apache.kafka:kafka-streams:2.6.0'
-
-      // Use JUnit test framework
-      testImplementation 'junit:junit:4.12'
-  }
-}
-
-// Per default XML reports for SpotBugs are generated
-// Include this to generate HTML reports
-tasks.withType(com.github.spotbugs.snom.SpotBugsTask) {
-  reports {
-    // Either HTML or XML reports can be activated
-    html.enabled true
-    xml.enabled false
-  }
-}
-
-// Subprojects quality tools tasks
-subprojects {
-  task pmd {
-    group 'Quality Assurance'
-    description 'Run PMD'
-
-    dependsOn 'pmdMain'
-    dependsOn 'pmdTest'
-  }
-
-  task checkstyle {
-    group 'Quality Assurance'
-    description 'Run Checkstyle'
-
-    dependsOn 'checkstyleMain'
-    dependsOn 'checkstyleTest'
-  }
-
-  task spotbugs {
-    group 'Quality Assurance'
-    description 'Run SpotBugs'
-
-    dependsOn 'spotbugsMain'
-    dependsOn 'spotbugsTest'
-  }
-}
-
-// Subprojects quality tools configuration
-subprojects {
-  pmd {
-    ruleSets = [] // Gradle requires to clean the rule sets first
-    ruleSetFiles = files("$rootProject.projectDir/config/pmd.xml")
-    ignoreFailures = false
-    toolVersion = "6.7.0"
-  }
-
-  checkstyle {
-    configDirectory = file("$rootProject.projectDir/config")
-    configFile = file("$rootProject.projectDir/config/checkstyle.xml")
-    maxWarnings = 0
-    ignoreFailures = false
-    toolVersion = "8.12"
-  }
-
-  spotbugs {
-    excludeFilter = file("$rootProject.projectDir/config/spotbugs-exclude-filter.xml")
-    reportLevel = "low"
-    effort = "max"
-    ignoreFailures = false
-    toolVersion = '4.1.4'
-  }
-}
-
-allprojects {
-  eclipse {
-      classpath {
-         downloadSources=true
-         downloadJavadoc=true
-      }
-  }
-}
diff --git a/benchmarks/gradle/wrapper/gradle-wrapper.jar b/benchmarks/gradle/wrapper/gradle-wrapper.jar
deleted file mode 100644
index 457aad0d98108420a977756b7145c93c8910b076..0000000000000000000000000000000000000000
Binary files a/benchmarks/gradle/wrapper/gradle-wrapper.jar and /dev/null differ
diff --git a/benchmarks/settings.gradle b/benchmarks/settings.gradle
deleted file mode 100644
index 9104525ce160a25957f9731f820a723b4f36f7d5..0000000000000000000000000000000000000000
--- a/benchmarks/settings.gradle
+++ /dev/null
@@ -1,16 +0,0 @@
-rootProject.name = 'scalability-benchmarking'
-
-include 'workload-generator-commons'
-include 'application-kafkastreams-commons'
-
-include 'uc1-workload-generator'
-include 'uc1-application'
-
-include 'uc2-workload-generator'
-include 'uc2-application'
-
-include 'uc3-workload-generator'
-include 'uc3-application'
-
-include 'uc4-workload-generator'
-include 'uc4-application'
diff --git a/benchmarks/uc1-workload-generator/Dockerfile b/benchmarks/uc1-workload-generator/Dockerfile
deleted file mode 100644
index 91f18d740fa87d7b03480a3352a1fa0eccc845db..0000000000000000000000000000000000000000
--- a/benchmarks/uc1-workload-generator/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM openjdk:11-slim
-
-ADD build/distributions/uc1-workload-generator.tar /
-
-CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc1-workload-generator/bin/uc1-workload-generator
\ No newline at end of file
diff --git a/benchmarks/uc2-workload-generator/Dockerfile b/benchmarks/uc2-workload-generator/Dockerfile
deleted file mode 100644
index 55593e0295efb0c4f7d4c484b1b104c256f9b958..0000000000000000000000000000000000000000
--- a/benchmarks/uc2-workload-generator/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM openjdk:11-slim
-
-ADD build/distributions/uc2-workload-generator.tar /
-
-CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc2-workload-generator/bin/uc2-workload-generator
\ No newline at end of file
diff --git a/benchmarks/uc3-workload-generator/Dockerfile b/benchmarks/uc3-workload-generator/Dockerfile
deleted file mode 100644
index 8422c9d5371b86ced0a38c141c461aef452133ac..0000000000000000000000000000000000000000
--- a/benchmarks/uc3-workload-generator/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM openjdk:11-slim
-
-ADD build/distributions/uc3-workload-generator.tar /
-
-CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc3-workload-generator/bin/uc3-workload-generator
diff --git a/benchmarks/uc4-workload-generator/Dockerfile b/benchmarks/uc4-workload-generator/Dockerfile
deleted file mode 100644
index f39923e59d3079d3b163ffc5d2e4906599de026d..0000000000000000000000000000000000000000
--- a/benchmarks/uc4-workload-generator/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM openjdk:11-slim
-
-ADD build/distributions/uc4-workload-generator.tar /
-
-CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc4-workload-generator/bin/uc4-workload-generator
diff --git a/benchmarks/workload-generator-commons/build.gradle b/benchmarks/workload-generator-commons/build.gradle
deleted file mode 100644
index 98d820b480ba0b357b74f82ebce5a647ee392461..0000000000000000000000000000000000000000
--- a/benchmarks/workload-generator-commons/build.gradle
+++ /dev/null
@@ -1,5 +0,0 @@
-dependencies {
-  implementation 'com.google.guava:guava:30.1-jre'
-  implementation 'com.hazelcast:hazelcast:4.1.1'
-  implementation 'com.hazelcast:hazelcast-kubernetes:2.2.1'
-}
\ No newline at end of file
diff --git a/codemeta.json b/codemeta.json
index eff1f1ba4f3c9a70a46c3cf83c47c279e1838cf9..a158e30eb7f1ab433779678aba3a1cc3b7e33c80 100644
--- a/codemeta.json
+++ b/codemeta.json
@@ -5,10 +5,10 @@
     "codeRepository": "https://github.com/cau-se/theodolite",
     "dateCreated": "2020-03-13",
     "datePublished": "2020-07-27",
-    "dateModified": "2021-02-11",
+    "dateModified": "2021-11-12",
     "downloadUrl": "https://github.com/cau-se/theodolite/releases",
     "name": "Theodolite",
-    "version": "0.3.0",
+    "version": "0.5.1",
     "description": "Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines.",
     "developmentStatus": "active",
     "referencePublication": "https://doi.org/10.1016/j.bdr.2021.100209",
diff --git a/docs/CNAME b/docs/CNAME
new file mode 100644
index 0000000000000000000000000000000000000000..b1c7ffdbcd7523245c451869092ff0498bd7b8db
--- /dev/null
+++ b/docs/CNAME
@@ -0,0 +1 @@
+www.theodolite.rocks
\ No newline at end of file
diff --git a/docs/README.md b/docs/README.md
index 4fd13bdfc157efe8b3491695bb83972f96a82c5d..eb0848d52ec4235c6325ba0a373ea2628e52a102 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -10,16 +10,20 @@ permalink: /
 
 Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines. It consists of three modules:
 
-## Theodolite Benchmarks
+## Theodolite Benchmarking Tool
 
-Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Kafka Streams.
+Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. It is recommended to install Theodolite with the package manager Helm. The Theodolite Helm chart along with instructions how to install it can be found in the [`helm`](helm) directory.
 
+## Theodolite Analysis Tools
 
-## Theodolite Execution Framework
+Theodolite's benchmarking method maps load intensities to the resource amounts that are required for processing them. A plot showing how resource demand evolves with an increasing load allows to draw conclusions about the scalability of a stream processing engine or its deployment. Theodolite provides Jupyter notebooks for creating such plots based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
 
-Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys as components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
+## Theodolite Benchmarks
 
+Theodolite comes with 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding load generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams and Apache Flink. The benchmark sources can be found in [Thedolite benchmarks](theodolite-benchmarks).
 
-## Theodolite Analysis Tools
+## How to Cite
+
+If you use Theodolite, please cite
 
-Theodolite's benchmarking method create a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+> Sören Henning and Wilhelm Hasselbring. (2021). Theodolite: Scalability Benchmarking of Distributed Stream Processing Engines in Microservice Architectures. Big Data Research, Volume 25. DOI: [10.1016/j.bdr.2021.100209](https://doi.org/10.1016/j.bdr.2021.100209). arXiv:[2009.00304](https://arxiv.org/abs/2009.00304).
diff --git a/docs/crd-docu.md b/docs/crd-docu.md
new file mode 100644
index 0000000000000000000000000000000000000000..73d85c951fc2958aee25cde2cdff652034643c1a
--- /dev/null
+++ b/docs/crd-docu.md
@@ -0,0 +1,1226 @@
+# API Reference
+
+Packages:
+
+- [theodolite.com/v1](#theodolitecomv1)
+
+# theodolite.com/v1
+
+Resource Types:
+
+- [benchmark](#benchmark)
+
+- [execution](#execution)
+
+
+
+
+## benchmark
+<sup><sup>[↩ Parent](#theodolitecomv1 )</sup></sup>
+
+
+
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+      <td><b>apiVersion</b></td>
+      <td>string</td>
+      <td>theodolite.com/v1</td>
+      <td>true</td>
+      </tr>
+      <tr>
+      <td><b>kind</b></td>
+      <td>string</td>
+      <td>benchmark</td>
+      <td>true</td>
+      </tr>
+      <tr>
+      <td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta">metadata</a></b></td>
+      <td>object</td>
+      <td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
+      <td>true</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkstatus">status</a></b></td>
+        <td>object</td>
+        <td>
+          <br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspec">spec</a></b></td>
+        <td>object</td>
+        <td>
+          <br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.status
+<sup><sup>[↩ Parent](#benchmark)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>resourceSetsState</b></td>
+        <td>string</td>
+        <td>
+          The status of a Benchmark indicates whether all resources are available to start the benchmark or not.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec
+<sup><sup>[↩ Parent](#benchmark)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecinfrastructure">infrastructure</a></b></td>
+        <td>object</td>
+        <td>
+          (Optional) A list of file names that reference Kubernetes resources that are deployed on the cluster to create the required infrastructure.<br/>
+          <br/>
+            <i>Default</i>: map[]<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>name</b></td>
+        <td>string</td>
+        <td>
+          This field exists only for technical reasons and should not be set by the user. The value of the field will be overwritten.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspeckafkaconfig">kafkaConfig</a></b></td>
+        <td>object</td>
+        <td>
+          Contains the Kafka configuration.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecloadgenerator">loadGenerator</a></b></td>
+        <td>object</td>
+        <td>
+          The loadGenResourceSets specifies all Kubernetes resources required to start the load generator. A resourceSet can be either a configMap resourceSet or a fileSystem resourceSet.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecloadtypesindex">loadTypes</a></b></td>
+        <td>[]object</td>
+        <td>
+          A list of load types that can be scaled for this benchmark. For each load type the concrete values are defined in the execution object.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecresourcetypesindex">resourceTypes</a></b></td>
+        <td>[]object</td>
+        <td>
+          A list of resource types that can be scaled for this `benchmark` resource. For each resource type the concrete values are defined in the `execution` object.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecsut">sut</a></b></td>
+        <td>object</td>
+        <td>
+          The appResourceSets specifies all Kubernetes resources required to start the sut. A resourceSet can be either a configMap resourceSet or a fileSystem resourceSet.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.infrastructure
+<sup><sup>[↩ Parent](#benchmarkspec)</sup></sup>
+
+
+
+(Optional) A list of file names that reference Kubernetes resources that are deployed on the cluster to create the required infrastructure.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecinfrastructureresourcesindex">resources</a></b></td>
+        <td>[]object</td>
+        <td>
+          <br/>
+          <br/>
+            <i>Default</i>: []<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.infrastructure.resources[index]
+<sup><sup>[↩ Parent](#benchmarkspecinfrastructure)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecinfrastructureresourcesindexconfigmap">configMap</a></b></td>
+        <td>object</td>
+        <td>
+          The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecinfrastructureresourcesindexfilesystem">fileSystem</a></b></td>
+        <td>object</td>
+        <td>
+          The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.infrastructure.resources[index].configMap
+<sup><sup>[↩ Parent](#benchmarkspecinfrastructureresourcesindex)</sup></sup>
+
+
+
+The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>files</b></td>
+        <td>[]string</td>
+        <td>
+          (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>name</b></td>
+        <td>string</td>
+        <td>
+          The name of the configMap<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.infrastructure.resources[index].fileSystem
+<sup><sup>[↩ Parent](#benchmarkspecinfrastructureresourcesindex)</sup></sup>
+
+
+
+The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>files</b></td>
+        <td>[]string</td>
+        <td>
+          (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>path</b></td>
+        <td>string</td>
+        <td>
+          The path to the folder which contains the Kubernetes manifests files.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.kafkaConfig
+<sup><sup>[↩ Parent](#benchmarkspec)</sup></sup>
+
+
+
+Contains the Kafka configuration.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>bootstrapServer</b></td>
+        <td>string</td>
+        <td>
+          The bootstrap servers connection string.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspeckafkaconfigtopicsindex">topics</a></b></td>
+        <td>[]object</td>
+        <td>
+          List of topics to be created for each experiment. Alternative theodolite offers the possibility to remove certain topics after each experiment.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.kafkaConfig.topics[index]
+<sup><sup>[↩ Parent](#benchmarkspeckafkaconfig)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>numPartitions</b></td>
+        <td>integer</td>
+        <td>
+          The number of partitions of the topic.<br/>
+          <br/>
+            <i>Default</i>: 0<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>removeOnly</b></td>
+        <td>boolean</td>
+        <td>
+          Determines if this topic should only be deleted after each experiement. For removeOnly topics the name can be a RegEx describing the topic.<br/>
+          <br/>
+            <i>Default</i>: false<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>replicationFactor</b></td>
+        <td>integer</td>
+        <td>
+          The replication factor of the topic.<br/>
+          <br/>
+            <i>Default</i>: 0<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>name</b></td>
+        <td>string</td>
+        <td>
+          The name of the topic.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.loadGenerator
+<sup><sup>[↩ Parent](#benchmarkspec)</sup></sup>
+
+
+
+The loadGenResourceSets specifies all Kubernetes resources required to start the load generator. A resourceSet can be either a configMap resourceSet or a fileSystem resourceSet.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecloadgeneratorresourcesindex">resources</a></b></td>
+        <td>[]object</td>
+        <td>
+          <br/>
+          <br/>
+            <i>Default</i>: []<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.loadGenerator.resources[index]
+<sup><sup>[↩ Parent](#benchmarkspecloadgenerator)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecloadgeneratorresourcesindexconfigmap">configMap</a></b></td>
+        <td>object</td>
+        <td>
+          The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecloadgeneratorresourcesindexfilesystem">fileSystem</a></b></td>
+        <td>object</td>
+        <td>
+          The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.loadGenerator.resources[index].configMap
+<sup><sup>[↩ Parent](#benchmarkspecloadgeneratorresourcesindex)</sup></sup>
+
+
+
+The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>files</b></td>
+        <td>[]string</td>
+        <td>
+          (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>name</b></td>
+        <td>string</td>
+        <td>
+          The name of the configMap<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.loadGenerator.resources[index].fileSystem
+<sup><sup>[↩ Parent](#benchmarkspecloadgeneratorresourcesindex)</sup></sup>
+
+
+
+The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>files</b></td>
+        <td>[]string</td>
+        <td>
+          (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>path</b></td>
+        <td>string</td>
+        <td>
+          The path to the folder which contains the Kubernetes manifests files.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.loadTypes[index]
+<sup><sup>[↩ Parent](#benchmarkspec)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecloadtypesindexpatchersindex">patchers</a></b></td>
+        <td>[]object</td>
+        <td>
+          List of patchers used to scale this resource type.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>typeName</b></td>
+        <td>string</td>
+        <td>
+          Name of the load type.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.loadTypes[index].patchers[index]
+<sup><sup>[↩ Parent](#benchmarkspecloadtypesindex)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>properties</b></td>
+        <td>map[string]string</td>
+        <td>
+          (Optional) Patcher specific additional arguments.<br/>
+          <br/>
+            <i>Default</i>: map[]<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>resource</b></td>
+        <td>string</td>
+        <td>
+          Specifies the Kubernetes resource to be patched.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>type</b></td>
+        <td>string</td>
+        <td>
+          Type of the Patcher.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.resourceTypes[index]
+<sup><sup>[↩ Parent](#benchmarkspec)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecresourcetypesindexpatchersindex">patchers</a></b></td>
+        <td>[]object</td>
+        <td>
+          List of patchers used to scale this resource type.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>typeName</b></td>
+        <td>string</td>
+        <td>
+          Name of the resource type.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.resourceTypes[index].patchers[index]
+<sup><sup>[↩ Parent](#benchmarkspecresourcetypesindex)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>properties</b></td>
+        <td>map[string]string</td>
+        <td>
+          (Optional) Patcher specific additional arguments.<br/>
+          <br/>
+            <i>Default</i>: map[]<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>resource</b></td>
+        <td>string</td>
+        <td>
+          Specifies the Kubernetes resource to be patched.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>type</b></td>
+        <td>string</td>
+        <td>
+          Type of the patcher.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.sut
+<sup><sup>[↩ Parent](#benchmarkspec)</sup></sup>
+
+
+
+The appResourceSets specifies all Kubernetes resources required to start the sut. A resourceSet can be either a configMap resourceSet or a fileSystem resourceSet.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecsutresourcesindex">resources</a></b></td>
+        <td>[]object</td>
+        <td>
+          <br/>
+          <br/>
+            <i>Default</i>: []<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.sut.resources[index]
+<sup><sup>[↩ Parent](#benchmarkspecsut)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#benchmarkspecsutresourcesindexconfigmap">configMap</a></b></td>
+        <td>object</td>
+        <td>
+          The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b><a href="#benchmarkspecsutresourcesindexfilesystem">fileSystem</a></b></td>
+        <td>object</td>
+        <td>
+          The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.sut.resources[index].configMap
+<sup><sup>[↩ Parent](#benchmarkspecsutresourcesindex)</sup></sup>
+
+
+
+The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>files</b></td>
+        <td>[]string</td>
+        <td>
+          (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>name</b></td>
+        <td>string</td>
+        <td>
+          The name of the configMap<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### benchmark.spec.sut.resources[index].fileSystem
+<sup><sup>[↩ Parent](#benchmarkspecsutresourcesindex)</sup></sup>
+
+
+
+The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>files</b></td>
+        <td>[]string</td>
+        <td>
+          (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>path</b></td>
+        <td>string</td>
+        <td>
+          The path to the folder which contains the Kubernetes manifests files.<br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+## execution
+<sup><sup>[↩ Parent](#theodolitecomv1 )</sup></sup>
+
+
+
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+      <td><b>apiVersion</b></td>
+      <td>string</td>
+      <td>theodolite.com/v1</td>
+      <td>true</td>
+      </tr>
+      <tr>
+      <td><b>kind</b></td>
+      <td>string</td>
+      <td>execution</td>
+      <td>true</td>
+      </tr>
+      <tr>
+      <td><b><a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#objectmeta-v1-meta">metadata</a></b></td>
+      <td>object</td>
+      <td>Refer to the Kubernetes API documentation for the fields of the `metadata` field.</td>
+      <td>true</td>
+      </tr><tr>
+        <td><b><a href="#executionstatus">status</a></b></td>
+        <td>object</td>
+        <td>
+          <br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b><a href="#executionspec">spec</a></b></td>
+        <td>object</td>
+        <td>
+          <br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### execution.status
+<sup><sup>[↩ Parent](#execution)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>executionDuration</b></td>
+        <td>string</td>
+        <td>
+          Duration of the execution in seconds<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>executionState</b></td>
+        <td>string</td>
+        <td>
+          <br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec
+<sup><sup>[↩ Parent](#execution)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>name</b></td>
+        <td>string</td>
+        <td>
+          This field exists only for technical reasons and should not be set by the user. The value of the field will be overwritten.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>benchmark</b></td>
+        <td>string</td>
+        <td>
+          The name of the benchmark this execution is referring to.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#executionspecconfigoverridesindex">configOverrides</a></b></td>
+        <td>[]object</td>
+        <td>
+          List of patchers that are used to override existing configurations.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#executionspecexecution">execution</a></b></td>
+        <td>object</td>
+        <td>
+          Defines the overall parameter for the execution.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#executionspecload">load</a></b></td>
+        <td>object</td>
+        <td>
+          Specifies the load values that are benchmarked.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#executionspecresources">resources</a></b></td>
+        <td>object</td>
+        <td>
+          Specifies the scaling resource that is benchmarked.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b><a href="#executionspecslosindex">slos</a></b></td>
+        <td>[]object</td>
+        <td>
+          List of resource values for the specified resource type.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec.configOverrides[index]
+<sup><sup>[↩ Parent](#executionspec)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b><a href="#executionspecconfigoverridesindexpatcher">patcher</a></b></td>
+        <td>object</td>
+        <td>
+          Patcher used to patch a resource<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>value</b></td>
+        <td>string</td>
+        <td>
+          <br/>
+        </td>
+        <td>false</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec.configOverrides[index].patcher
+<sup><sup>[↩ Parent](#executionspecconfigoverridesindex)</sup></sup>
+
+
+
+Patcher used to patch a resource
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>properties</b></td>
+        <td>map[string]string</td>
+        <td>
+          (Optional) Patcher specific additional arguments.<br/>
+          <br/>
+            <i>Default</i>: map[]<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>resource</b></td>
+        <td>string</td>
+        <td>
+          Specifies the Kubernetes resource to be patched.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>type</b></td>
+        <td>string</td>
+        <td>
+          Type of the Patcher.<br/>
+          <br/>
+            <i>Default</i>: <br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec.execution
+<sup><sup>[↩ Parent](#executionspec)</sup></sup>
+
+
+
+Defines the overall parameter for the execution.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>loadGenerationDelay</b></td>
+        <td>integer</td>
+        <td>
+          Seconds to wait between the start of the SUT and the load generator.<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>duration</b></td>
+        <td>integer</td>
+        <td>
+          Defines the duration of each experiment in seconds.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>repetitions</b></td>
+        <td>integer</td>
+        <td>
+          Numper of repititions for each experiments.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>restrictions</b></td>
+        <td>[]string</td>
+        <td>
+          List of restriction strategys used to delimit the search space.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>strategy</b></td>
+        <td>string</td>
+        <td>
+          Defines the used strategy for the execution, either 'LinearSearch' or 'BinarySearch'<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec.load
+<sup><sup>[↩ Parent](#executionspec)</sup></sup>
+
+
+
+Specifies the load values that are benchmarked.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>loadType</b></td>
+        <td>string</td>
+        <td>
+          The type of the load. It must match one of the load types specified in the referenced benchmark.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>loadValues</b></td>
+        <td>[]integer</td>
+        <td>
+          List of load values for the specified load type.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec.resources
+<sup><sup>[↩ Parent](#executionspec)</sup></sup>
+
+
+
+Specifies the scaling resource that is benchmarked.
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>resourceType</b></td>
+        <td>string</td>
+        <td>
+          The type of the resource. It must match one of the resource types specified in the referenced benchmark.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>resourceValues</b></td>
+        <td>[]integer</td>
+        <td>
+          List of resource values for the specified resource type.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
+
+
+### execution.spec.slos[index]
+<sup><sup>[↩ Parent](#executionspec)</sup></sup>
+
+
+
+
+
+<table>
+    <thead>
+        <tr>
+            <th>Name</th>
+            <th>Type</th>
+            <th>Description</th>
+            <th>Required</th>
+        </tr>
+    </thead>
+    <tbody><tr>
+        <td><b>properties</b></td>
+        <td>map[string]string</td>
+        <td>
+          (Optional) SLO specific additional arguments.<br/>
+          <br/>
+            <i>Default</i>: map[]<br/>
+        </td>
+        <td>false</td>
+      </tr><tr>
+        <td><b>offset</b></td>
+        <td>integer</td>
+        <td>
+          Hours by which the start and end timestamp will be shifted (for different timezones).<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>prometheusUrl</b></td>
+        <td>string</td>
+        <td>
+          Connection string for Promehteus.<br/>
+        </td>
+        <td>true</td>
+      </tr><tr>
+        <td><b>sloType</b></td>
+        <td>string</td>
+        <td>
+          The type of the SLO. It must match 'lag trend'.<br/>
+        </td>
+        <td>true</td>
+      </tr></tbody>
+</table>
\ No newline at end of file
diff --git a/docs/index.yaml b/docs/index.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..54580ea45f1c678443dae96c7139f53fdac37f60
--- /dev/null
+++ b/docs/index.yaml
@@ -0,0 +1,109 @@
+apiVersion: v1
+entries:
+  theodolite:
+  - apiVersion: v2
+    appVersion: 0.5.1
+    created: "2021-11-12T16:15:01.629937292+01:00"
+    dependencies:
+    - condition: grafana.enabled
+      name: grafana
+      repository: https://grafana.github.io/helm-charts
+      version: 6.17.5
+    - condition: kube-prometheus-stack.enabled
+      name: kube-prometheus-stack
+      repository: https://prometheus-community.github.io/helm-charts
+      version: 12.0.0
+    - condition: cp-helm-charts.enabled
+      name: cp-helm-charts
+      repository: https://soerenhenning.github.io/cp-helm-charts
+      version: 0.6.0
+    - condition: kafka-lag-exporter.enabled
+      name: kafka-lag-exporter
+      repository: https://lightbend.github.io/kafka-lag-exporter/repo/
+      version: 0.6.6
+    description: Theodolite is a framework for benchmarking the scalability stream
+      processing engines.
+    digest: a67374c4cb2b0e8b2d711468364c6b4a486a910bd1c667dbf3c5614e36e0680c
+    home: https://cau-se.github.io/theodolite
+    maintainers:
+    - email: soeren.henning@email.uni-kiel.de
+      name: Sören Henning
+      url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
+    name: theodolite
+    sources:
+    - https://github.com/cau-se/theodolite
+    type: application
+    urls:
+    - https://github.com/cau-se/theodolite/releases/download/v0.5.1/theodolite-0.5.1.tgz
+    version: 0.5.1
+  - apiVersion: v2
+    appVersion: 0.5.0
+    created: "2021-11-04T17:45:14.153231798+01:00"
+    dependencies:
+    - condition: grafana.enabled
+      name: grafana
+      repository: https://grafana.github.io/helm-charts
+      version: 6.0.0
+    - condition: kube-prometheus-stack.enabled
+      name: kube-prometheus-stack
+      repository: https://prometheus-community.github.io/helm-charts
+      version: 12.0.0
+    - condition: cp-helm-charts.enabled
+      name: cp-helm-charts
+      repository: https://soerenhenning.github.io/cp-helm-charts
+      version: 0.6.0
+    - condition: kafka-lag-exporter.enabled
+      name: kafka-lag-exporter
+      repository: https://lightbend.github.io/kafka-lag-exporter/repo/
+      version: 0.6.6
+    description: Theodolite is a framework for benchmarking the scalability stream
+      processing engines.
+    digest: 8a4f218e44341eb8fb09ddc58c6aaa0a14aded685f3423088c21fe0ffc112281
+    home: https://cau-se.github.io/theodolite
+    maintainers:
+    - email: soeren.henning@email.uni-kiel.de
+      name: Sören Henning
+      url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
+    name: theodolite
+    sources:
+    - https://github.com/cau-se/theodolite
+    type: application
+    urls:
+    - https://github.com/cau-se/theodolite/releases/download/v0.5.0/theodolite-0.5.0.tgz
+    version: 0.5.0
+  - apiVersion: v2
+    appVersion: 0.4.0
+    created: "2021-03-18T15:50:50.930902088+01:00"
+    dependencies:
+    - condition: grafana.enabled
+      name: grafana
+      repository: https://grafana.github.io/helm-charts
+      version: 6.0.0
+    - condition: kube-prometheus-stack.enabled
+      name: kube-prometheus-stack
+      repository: https://prometheus-community.github.io/helm-charts
+      version: 12.0.0
+    - condition: cp-helm-charts.enabled
+      name: cp-helm-charts
+      repository: https://soerenhenning.github.io/cp-helm-charts
+      version: 0.6.0
+    - condition: kafka-lag-exporter.enabled
+      name: kafka-lag-exporter
+      repository: https://lightbend.github.io/kafka-lag-exporter/repo/
+      version: 0.6.6
+    description: Theodolite is a framework for benchmarking the scalability stream
+      processing engines.
+    digest: 45975b61b79547b152241cfc6dcf5e640090ff2c08ff9120275c77c9d9054155
+    home: https://cau-se.github.io/theodolite
+    maintainers:
+    - email: soeren.henning@email.uni-kiel.de
+      name: Sören Henning
+      url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
+    name: theodolite
+    sources:
+    - https://github.com/cau-se/theodolite
+    type: application
+    urls:
+    - https://github.com/cau-se/theodolite/releases/download/v0.4.0/theodolite-0.4.0.tgz
+    version: 0.4.0
+generated: "2021-11-12T16:15:01.591258889+01:00"
diff --git a/docs/patchers.md b/docs/patchers.md
new file mode 100644
index 0000000000000000000000000000000000000000..572f107fb38ba295cd013abeff5dd53c2702527b
--- /dev/null
+++ b/docs/patchers.md
@@ -0,0 +1,50 @@
+## Patchers
+
+* **ReplicaPatcher**: Allows to modify the number of Replicas for a kubernetes deployment.
+  * **type**: "ReplicaPatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+
+* **NumSensorsLoadGeneratorReplicaPatcher**: Allows to scale the nummer of load generators. Scales arcording to the following formular: (value + 15_000 - 1) / 15_000
+  * **type**: "NumSensorsLoadGeneratorReplicaPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+
+* **NumNestedGroupsLoadGeneratorReplicaPatcher**: Allows to scale the nummer of load generators. Scales arcording to the following formular: (4^(value) + 15_000 -1) /15_000
+  * **type**: "NumNestedGroupsLoadGeneratorReplicaPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+
+* **ReplicaPatcher**: Allows to modify the number of Replicas for a kubernetes deployment.
+  * **type**: "ReplicaPatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+
+* **EnvVarPatcher**: Allows to modify the value of an environment variable for a container in a kubernetes deployment. 
+  * **type**: "EnvVarPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+  * **properties**:
+    * container: "workload-generator"
+    * variableName: "NUM_SENSORS"
+
+* **NodeSelectorPatcher**: Changes the node selection field in kubernetes resources.
+  * **type**: "NodeSelectorPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+  * **properties**:
+    * variableName: "env"
+  * **value**: "prod"
+
+* **ResourceLimitPatcher**: Changes the resource limit for a kubernetes resource.
+  * **resource**: "uc1-kstreams-deployment.yaml"
+  * **properties**:
+    * container: "uc-application"
+    * variableName: "cpu" or "memory"
+  * **value**:"1000m" or "2Gi"
+  
+* **SchedulerNamePatcher**: Changes the sheduler for kubernetes resources.
+  * **type**: "SchedulerNamePatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+  * **value**: "random-scheduler"
+
+* **ImagePatcher**: Changes the image of a kubernetes resource. Currently not fully implemented.
+  * **type**: "ImagePatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+  * **properties**:
+    * container: "uc-application"
+  * **value**: "dockerhubrepo/imagename"
diff --git a/docs/release-process.md b/docs/release-process.md
index 961106247fd0967a2dd6ffdd980e35235ceed168..103d8d1ac65472459bcaad648f921240eaf508c8 100644
--- a/docs/release-process.md
+++ b/docs/release-process.md
@@ -11,20 +11,45 @@ This document describes how to perform a new Theodolite release.
 We assume that we are creating the release `v0.3.1`. Please make sure to adjust
 the following steps according to the release, you are actually performing.
 
-1. Update `codemeta.json` to match the new version. In particular, make sure that `version` points to the version you are releasing and `dateModified` points to the date you are relasing this version. [CodeMeata generator](https://codemeta.github.io/codemeta-generator/) may help you in updating the file.
+1. Create a new branch `v0.3` if it does not already exist. This branch will never
+again be merged into master.
 
-2. Update `CITATION.cff` to match the new version. At least update the `version` field.
+2. Checkout the `v0.3` branch.
 
-3. Create a new branch `v0.3` if it does not already exists. This branch will never
-again be merged into master.
+3. Update all references to artifacts which are versioned. This includes:
+
+    1. Update all references to Theodolite Docker images to tag `v0.3.1`. These are:
+        1. the default `helm/values.yaml` file,
+        2. the example `execution/theodolite.yaml` job,
+        3. the Kubernetes benchmark resources in `theodolite-benchmarks/definitions/**/resources` and
+        2. the Docker Compose files in `theodolite-benchmarks/docker-test`.
+
+    2. Update both, the `version` and the `appVersion` fields, in the Helm `Charts.yaml` file to `0.3.1`.
+
+    3. Update `codemeta.json` to match the new version. In particular, make sure that `version` points to the version you are releasing and `dateModified` points to the date you are relasing this version. [CodeMeata generator](https://codemeta.github.io/codemeta-generator/) may help you in updating the file.
+
+    4. Update `CITATION.cff` to match the new version. At least update the `version` field.
+
+4. Create a Helm package by running `./build-package.sh` from the chart directory.
+
+5. Update the Helm repository index of located at `/docs` by running `./update-index.sh v0.3.1`.
+
+6. Commit these changes to the `v0.3` branch.
+
+7. Tag this commit `v0.3.1` (can be done via GitLab). The corresponding Docker images will be uploaded.
+
+8. Create *releases* on GitLab and GitHub. Upload the generated Helm package to these releases.
+
+9. Switch to the `master` branch.
+
+10. Re-run `./update-index.sh v0.3.1` to include the latest release in the *upstream* Helm repository. You can now delete the packaged Helm chart.
 
-4. Checkout the `v0.3` branch.
+11. If this release increments Theodolite's *latest* version number, 
 
-5. Update all references to Theodolite Docker images to tag `v0.3.1`. These are the Kubernetes resource definitions in
-`execution`, the references to *latest* in `run_uc.py`, the Docker Compose files in `docker-test` and the example `theodolite.yaml` job.
+    1. Update the Helm `Charts.yaml` file to `0.4.0-SNAPSHOT` (see Step 3).
 
-6. Commit these changes.
+    2. Update the `codemeta.json` file according to Step 3.
 
-7. Tag this commit with `v0.3.1`. The corresponding Docker images will be uploaded.
+    3. Update the `CITATION.cff` file according to Step 3.
 
-8. Create *releases* for this tag in both, GitLab and GitHub.
+12. Commit these changes to the `master` branch.
diff --git a/execution/.dockerignore b/execution/.dockerignore
deleted file mode 100644
index 68e5f21c503a80d7db64722d700351a303ddb9dd..0000000000000000000000000000000000000000
--- a/execution/.dockerignore
+++ /dev/null
@@ -1,9 +0,0 @@
-*
-!requirements.txt
-!uc-workload-generator
-!uc-application
-!strategies
-!lib
-!theodolite.py
-!run_uc.py
-!lag_analysis.py
diff --git a/execution/.gitignore b/execution/.gitignore
deleted file mode 100644
index bac9a5d1eeb12d9e40d38376904e8fb69c0e5231..0000000000000000000000000000000000000000
--- a/execution/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-exp_counter.txt
-results
diff --git a/execution/Dockerfile b/execution/Dockerfile
deleted file mode 100644
index e71bc91d9d31bea4c1598292e43d0ab7c193c3fa..0000000000000000000000000000000000000000
--- a/execution/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM python:3.8
-
-RUN mkdir /app
-WORKDIR /app
-ADD requirements.txt /app/
-RUN pip install -r requirements.txt
-COPY uc-workload-generator /app/uc-workload-generator
-COPY uc-application /app/uc-application
-COPY strategies /app/strategies
-COPY lib /app/lib
-COPY lag_analysis.py /app/
-COPY run_uc.py /app/
-COPY theodolite.py /app/
-
-CMD ["python", "/app/theodolite.py"]
diff --git a/execution/README.md b/execution/README.md
index 442f1c71929f9c7367909ce6609c9122faf3e814..c12eff782ffc129bb4b1820ce4a1e3076ab4f8ad 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -1,5 +1,7 @@
 # Theodolite Execution Framework
 
+**Please note: Most of the content in this directory is deprecated. Please refer to the `helm` directory for installing the latest version of Theodolite.**
+
 This directory contains the Theodolite framework for executing scalability
 benchmarks in a Kubernetes cluster. As Theodolite aims for executing benchmarks
 in realistic execution environments, some third-party components are [required](#installation).
@@ -96,7 +98,7 @@ kubectl apply -f infrastructure/kafka/service-monitor.yaml
 Other Kafka deployments, for example, using Strimzi, should work in a similar way.
 
 *Please note that currently, even if installed differently, the corresponding services must run at
-*my-confluent-cp-kafka:9092*, *my-confluent-cp-zookeeper:2181* and *my-confluent-cp-schema-registry:8081*.
+`my-confluent-cp-kafka:9092`, `my-confluent-cp-zookeeper:2181` and `my-confluent-cp-schema-registry:8081`.*
 
 #### A Kafka Client Pod
 
@@ -223,7 +225,17 @@ Theodolite locally on your machine see the description below.
 see the [Configuration](#configuration) section below. Note, that you might uncomment the `serviceAccountName` line if
 RBAC is enabled on your cluster (see installation of [Theodolite RBAC](#Theodolite-RBAC)).
 
-To start the execution of a benchmark run (with `<your-theodolite-yaml>` being your job definition):
+To start the execution of a benchmark create a ConfigMap which containts all required Kubernetes resource files for the SUT and the load generator, a ConfigMap for the execution and a ConfigMap for the benchmark.
+
+```sh
+kubectl create configmap app-resources-configmap --from-file=<folder-with-all-required-k8s-resources>
+kubectl create configmap execution-configmap --from-file=<execution.yaml>
+kubectl create configmap benchmark-configmap --from-file=<benchmark.yaml>
+```
+
+This will create three ConfigMaps. You can verify this via `kubectl get configmaps`.
+
+Start the Theodolite job (with `<your-theodolite-yaml>` being your job definition):
 
 ```sh
 kubectl create -f <your-theodolite-yaml>
@@ -239,24 +251,7 @@ Kubernetes volume.
 
 ### Configuration
 
-| Command line         | Kubernetes          | Description                                                  |
-| -------------------- | ------------------- | ------------------------------------------------------------ |
-| --uc                 | UC                  | **[Mandatory]** Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`. |
-| --loads              | LOADS               | **[Mandatory]** Values for the workload generator to be tested, should be sorted in ascending order. |
-| --instances          | INSTANCES           | **[Mandatory]** Numbers of instances to be benchmarked, should be sorted in ascending order. |
-| --duration           | DURATION            | Duration in minutes subexperiments should be executed for. *Default:* `5`. |
-| --partitions         | PARTITIONS          | Number of partitions for Kafka topics. *Default:* `40`.      |
-| --cpu-limit          | CPU_LIMIT           | Kubernetes CPU limit for a single Pod.  *Default:* `1000m`.  |
-| --memory-limit       | MEMORY_LIMIT        | Kubernetes memory limit for a single Pod. *Default:* `4Gi`.  |
-| --domain-restriction | DOMAIN_RESTRICTION  | A flag that indiciates domain restriction should be used. *Default:* not set. For more details see Section [Domain Restriction](#domain-restriction). |
-| --search-strategy    | SEARCH_STRATEGY     | The benchmarking search strategy. Can be set to `check-all`, `linear-search` or `binary-search`. *Default:* `check-all`. For more details see Section [Benchmarking Search Strategies](#benchmarking-search-strategies). |
-| --reset              | RESET               | Resets the environment before each subexperiment. Useful if execution was aborted and just one experiment should be executed. |
-| --reset-only         | RESET_ONLY          | Only resets the environment. Ignores all other parameters. Useful if execution was aborted and one want a clean state for new executions. |
-| --namespace          | NAMESPACE        | Kubernetes namespace. *Default:* `default`.  |
-| --prometheus         | PROMETHEUS_BASE_URL | Defines where to find the prometheus instance. *Default:* `http://localhost:9090` |
-| --path               | RESULT_PATH         | A directory path for the results. Relative to the Execution folder. *Default:* `results` |
-| --configurations     | CONFIGURATIONS      | Defines environment variables for the use cases and, thus, enables further configuration options. |
-| --threshold          | THRESHOLD           | The threshold for the trend slop that the search strategies use to determine that a load could be handled. *Default:* `2000` |
+Be sure, that the names of the configmap corresponds correctly to the specifications of the mounted `configmaps`, `volumes`, `mountPath`. In particular: The name of the execution file and the benchmark file must match the value of the corresponding environment variable.
 
 ### Domain Restriction
 
diff --git a/execution/infrastructure/grafana/dashboard-config-map.yaml b/execution/infrastructure/grafana/dashboard-config-map.yaml
index e858ffe7dfdd4fbcdf1592b0f564c305969f6af5..c9a328d3195a7c0cc26527df190e29f82b7a628a 100644
--- a/execution/infrastructure/grafana/dashboard-config-map.yaml
+++ b/execution/infrastructure/grafana/dashboard-config-map.yaml
@@ -252,7 +252,7 @@ data:
         "steppedLine": false,
         "targets": [
           {
-            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag > 0)",
+            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag >= 0)",
             "format": "time_series",
             "intervalFactor": 1,
             "legendFormat": "{{topic}}",
diff --git a/execution/infrastructure/kafka/values.yaml b/execution/infrastructure/kafka/values.yaml
index 9c708ca054bc017874522cebb4ad2157bdce85a7..15fd8a822a18521f247584d1becbd09c19c137d2 100644
--- a/execution/infrastructure/kafka/values.yaml
+++ b/execution/infrastructure/kafka/values.yaml
@@ -48,7 +48,7 @@ cp-kafka:
   #   cpu: 100m
   #   memory: 128Mi
   configurationOverrides:
-    #"offsets.topic.replication.factor": "3"
+    # offsets.topic.replication.factor: "3"
     "message.max.bytes": "134217728" # 128 MB
     "replica.fetch.max.bytes": "134217728" # 128 MB
     # "default.replication.factor": 3
diff --git a/execution/infrastructure/kafka/values_kafka_nodeport.yaml b/execution/infrastructure/kafka/values_kafka_nodeport.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cf1deb3a0eda97039ad4609a1f07fa54d4d5d1ea
--- /dev/null
+++ b/execution/infrastructure/kafka/values_kafka_nodeport.yaml
@@ -0,0 +1,97 @@
+## ------------------------------------------------------
+## Zookeeper
+## ------------------------------------------------------
+cp-zookeeper:
+  enabled: true
+  servers: 1
+  image: confluentinc/cp-zookeeper
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  persistence:
+    enabled: false
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+## ------------------------------------------------------
+## Kafka
+## ------------------------------------------------------
+cp-kafka:
+  enabled: true
+  brokers: 1
+  image: confluentinc/cp-enterprise-kafka
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  persistence:
+    enabled: false
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+  configurationOverrides:
+    offsets.topic.replication.factor: "1"
+    "message.max.bytes": "134217728" # 128 MB
+    "replica.fetch.max.bytes": "134217728" # 128 MB
+    # "default.replication.factor": 3
+    # "min.insync.replicas": 2
+    "auto.create.topics.enable": false
+    "log.retention.ms": "10000" # 10s
+    #"log.retention.ms": "86400000" # 24h
+    "metrics.sample.window.ms": "5000" #5s
+  
+  # access kafka from outside
+  nodeport:
+    enabled: true
+
+## ------------------------------------------------------
+## Schema Registry
+## ------------------------------------------------------
+cp-schema-registry:
+  enabled: true
+  image: confluentinc/cp-schema-registry
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+cp-kafka-rest:
+  enabled: false
+
+cp-kafka-connect:
+  enabled: false
+
+cp-ksql-server:
+  enabled: false
+
+cp-control-center:
+  enabled: false
diff --git a/execution/infrastructure/kubernetes/rbac/role.yaml b/execution/infrastructure/kubernetes/rbac/role.yaml
index 84ba14a8bc7a6eceb8a20596ede057ca2271b967..22b755226f1a76045dfae96821130ac59bf13bde 100644
--- a/execution/infrastructure/kubernetes/rbac/role.yaml
+++ b/execution/infrastructure/kubernetes/rbac/role.yaml
@@ -17,7 +17,6 @@ rules:
     resources:
     - services
     - pods
-    - servicemonitors
     - configmaps
     verbs:
     - delete
@@ -38,4 +37,27 @@ rules:
     verbs:
     - delete
     - list
-    - create
\ No newline at end of file
+    - create
+    - get
+  - apiGroups:
+    - theodolite.com
+    resources: 
+    - executions
+    - benchmarks
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+    - watch
+    - update
+    - patch
+  - apiGroups:
+    - coordination.k8s.io
+    resources:
+    - leases
+    verbs:
+    - delete
+    - get
+    - create
+    - update
\ No newline at end of file
diff --git a/execution/infrastructure/random-scheduler/schedule.sh b/execution/infrastructure/random-scheduler/schedule.sh
index e2e10c0abbdd06da5f5075cd21851331ffb593fe..06745354d061225cfc1b3a746d361036b647051b 100755
--- a/execution/infrastructure/random-scheduler/schedule.sh
+++ b/execution/infrastructure/random-scheduler/schedule.sh
@@ -8,11 +8,18 @@ while true;
 do
     for PODNAME in $(kubectl get pods -n $TARGET_NAMESPACE -o json | jq '.items[] | select(.spec.schedulerName == "random-scheduler") | select(.spec.nodeName == null) | .metadata.name' | tr -d '"');
     do
-        NODES=($(kubectl get nodes -o json | jq '.items[].metadata.name' | tr -d '"'))
+        NODE_SELECTOR=$(kubectl get pod $PODNAME -n $TARGET_NAMESPACE -o json | jq -S 'if .spec.nodeSelector != null then .spec.nodeSelector else {} end')
+        NODES=($(kubectl get nodes -o json | jq --argjson nodeSelector "$NODE_SELECTOR" '.items[] | select(.metadata.labels | contains($nodeSelector)) | .metadata.name' | tr -d '"'))
         NUMNODES=${#NODES[@]}
+        if [ $NUMNODES -eq 0 ]; then
+            echo "No nodes found matching the node selector: $NODE_SELECTOR from pod $PODNAME"
+            echo "Pod $PODNAME cannot be scheduled."
+            continue;
+        fi
+        echo "Found $NUM_NODES suitable nodes for pod $PODNAME"
         CHOSEN=${NODES[$[$RANDOM % $NUMNODES]]}
         curl --header "Content-Type:application/json" --request POST --data '{"apiVersion":"v1", "kind": "Binding", "metadata": {"name": "'$PODNAME'"}, "target": {"apiVersion": "v1", "kind": "Node", "name": "'$CHOSEN'"}}' localhost:8080/api/v1/namespaces/$TARGET_NAMESPACE/pods/$PODNAME/binding/
         echo "Assigned $PODNAME to $CHOSEN"
     done
     sleep 1
-done
\ No newline at end of file
+done
diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
deleted file mode 100644
index 5b78ef3653753a2b95ac9b74bf8de156a71fb14c..0000000000000000000000000000000000000000
--- a/execution/lag_analysis.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import sys
-import os
-import requests
-from datetime import datetime, timedelta, timezone
-import pandas as pd
-import matplotlib.pyplot as plt
-import csv
-import logging
-
-
-def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
-    print("Main")
-    time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
-
-    now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0)
-    now = now_local - timedelta(milliseconds=time_diff_ms)
-    print(f"Now Local: {now_local}")
-    print(f"Now Used: {now}")
-
-    end = now
-    start = now - timedelta(minutes=execution_minutes)
-
-    #print(start.isoformat().replace('+00:00', 'Z'))
-    #print(end.isoformat().replace('+00:00', 'Z'))
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        # 'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
-        'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-    # response
-    # print(response.request.path_url)
-    # response.content
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        # print(result['metric']['topic'])
-        topic = result['metric']['topic']
-        for value in result['values']:
-            # print(value)
-            d.append({'topic': topic, 'timestamp': int(
-                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
-
-    df = pd.DataFrame(d)
-
-    # Do some analysis
-
-    input = df.loc[df['topic'] == "input"]
-
-    # input.plot(kind='line',x='timestamp',y='value',color='red')
-    # plt.show()
-
-    from sklearn.linear_model import LinearRegression
-
-    # values converts it into a numpy array
-    X = input.iloc[:, 1].values.reshape(-1, 1)
-    # -1 means that calculate the dimension of rows, but have 1 column
-    Y = input.iloc[:, 2].values.reshape(-1, 1)
-    linear_regressor = LinearRegression()  # create object for the class
-    linear_regressor.fit(X, Y)  # perform linear regression
-    Y_pred = linear_regressor.predict(X)  # make predictions
-
-    print(linear_regressor.coef_)
-
-    # print(Y_pred)
-
-    fields = [exp_id, datetime.now(), benchmark, dim_value,
-              instances, linear_regressor.coef_]
-    print(fields)
-    with open(f'{result_path}/results.csv', 'a') as f:
-        writer = csv.writer(f)
-        writer.writerow(fields)
-
-    filename = f"{result_path}/exp{exp_id}_{benchmark}_{dim_value}_{instances}"
-
-    plt.plot(X, Y)
-    plt.plot(X, Y_pred, color='red')
-
-    plt.savefig(f"{filename}_plot.png")
-
-    df.to_csv(f"{filename}_values.csv")
-
-    # Load total lag count
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        # print(result['metric']['topic'])
-        group = result['metric']['group']
-        for value in result['values']:
-            # print(value)
-            d.append({'group': group, 'timestamp': int(
-                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
-
-    df = pd.DataFrame(d)
-
-    df.to_csv(f"{filename}_totallag.csv")
-
-    # Load partition count
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        # print(result['metric']['topic'])
-        topic = result['metric']['topic']
-        for value in result['values']:
-            # print(value)
-            d.append({'topic': topic, 'timestamp': int(
-                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
-
-    df = pd.DataFrame(d)
-
-    df.to_csv(f"{filename}_partitions.csv")
-
-    # Load instances count
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        for value in result['values']:
-            # print(value)
-            d.append({'timestamp': int(value[0]), 'value': int(value[1])})
-
-    df = pd.DataFrame(d)
-
-    df.to_csv(f"{filename}_instances.csv")
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-
-    # Load arguments
-    exp_id = sys.argv[1]
-    benchmark = sys.argv[2]
-    dim_value = sys.argv[3]
-    instances = sys.argv[4]
-    execution_minutes = int(sys.argv[5])
-
-    main(exp_id, benchmark, dim_value, instances, execution_minutes,
-        'http://localhost:9090', 'results')
diff --git a/execution/lib/__init__.py b/execution/lib/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/execution/lib/cli_parser.py b/execution/lib/cli_parser.py
deleted file mode 100644
index de609bc55e21e9467a2b28168be6e478171cfddd..0000000000000000000000000000000000000000
--- a/execution/lib/cli_parser.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import argparse
-import os
-
-
-def env_list_default(env, tf):
-    """
-    Makes a list from an environment string.
-    """
-    v = os.environ.get(env)
-    if v is not None:
-        v = [tf(s) for s in v.split(',')]
-    return v
-
-
-def key_values_to_dict(kvs):
-    """
-    Given a list with key values in form `Key=Value` it creates a dict from it.
-    """
-    my_dict = {}
-    for kv in kvs:
-        k, v = kv.split("=")
-        my_dict[k] = v
-    return my_dict
-
-
-def env_dict_default(env):
-    """
-    Makes a dict from an environment string.
-    """
-    v = os.environ.get(env)
-    if v is not None:
-        return key_values_to_dict(v.split(','))
-    else:
-        return dict()
-
-
-class StoreDictKeyPair(argparse.Action):
-    def __init__(self, option_strings, dest, nargs=None, **kwargs):
-        self._nargs = nargs
-        super(StoreDictKeyPair, self).__init__(
-            option_strings, dest, nargs=nargs, **kwargs)
-
-    def __call__(self, parser, namespace, values, option_string=None):
-        my_dict = key_values_to_dict(values)
-        setattr(namespace, self.dest, my_dict)
-
-
-def default_parser(description):
-    """
-    Returns the default parser that can be used for thodolite and run uc py
-    :param description: The description the argument parser should show.
-    """
-    parser = argparse.ArgumentParser(description=description)
-    parser.add_argument('--uc',
-                        metavar='<uc>',
-                        default=os.environ.get('UC'),
-                        help='[mandatory] use case number, one of 1, 2, 3 or 4')
-    parser.add_argument('--partitions', '-p',
-                        metavar='<partitions>',
-                        type=int,
-                        default=os.environ.get('PARTITIONS', 40),
-                        help='Number of partitions for Kafka topics')
-    parser.add_argument('--cpu-limit', '-cpu',
-                        metavar='<CPU limit>',
-                        default=os.environ.get('CPU_LIMIT', '1000m'),
-                        help='Kubernetes CPU limit')
-    parser.add_argument('--memory-limit', '-mem',
-                        metavar='<memory limit>',
-                        default=os.environ.get('MEMORY_LIMIT', '4Gi'),
-                        help='Kubernetes memory limit')
-    parser.add_argument('--duration', '-d',
-                        metavar='<duration>',
-                        type=int,
-                        default=os.environ.get('DURATION', 5),
-                        help='Duration in minutes subexperiments should be \
-                                executed for')
-    parser.add_argument('--namespace',
-                        metavar='<NS>',
-                        default=os.environ.get('NAMESPACE', 'default'),
-                        help='Defines the Kubernetes where the applications should run')
-    parser.add_argument('--reset',
-                        action="store_true",
-                        default=os.environ.get(
-                            'RESET', 'false').lower() == 'true',
-                        help='Resets the environment before execution')
-    parser.add_argument('--reset-only',
-                        action="store_true",
-                        default=os.environ.get(
-                            'RESET_ONLY', 'false').lower() == 'true',
-                        help='Only resets the environment. Ignores all other parameters')
-    parser.add_argument('--prometheus',
-                        metavar='<URL>',
-                        default=os.environ.get(
-                            'PROMETHEUS_BASE_URL', 'http://localhost:9090'),
-                        help='Defines where to find the prometheus instance')
-    parser.add_argument('--path',
-                        metavar='<path>',
-                        default=os.environ.get('RESULT_PATH', 'results'),
-                        help='A directory path for the results')
-    parser.add_argument("--configurations",
-                        metavar="KEY=VAL",
-                        dest="configurations",
-                        action=StoreDictKeyPair,
-                        nargs="+",
-                        default=env_dict_default('CONFIGURATIONS'),
-                        help='Defines the environment variables for the UC')
-    return parser
-
-
-def benchmark_parser(description):
-    """
-    Parser for the overall benchmark execution
-    :param description: The description the argument parser should show.
-    """
-    parser = default_parser(description)
-
-    parser.add_argument('--loads',
-                        metavar='<load>',
-                        type=int,
-                        nargs='+',
-                        default=env_list_default('LOADS', int),
-                        help='[mandatory] Loads that should be executed')
-    parser.add_argument('--instances', '-i',
-                        dest='instances_list',
-                        metavar='<instances>',
-                        type=int,
-                        nargs='+',
-                        default=env_list_default('INSTANCES', int),
-                        help='[mandatory] List of instances used in benchmarks')
-    parser.add_argument('--domain-restriction',
-                        action="store_true",
-                        default=os.environ.get(
-                            'DOMAIN_RESTRICTION', 'false').lower() == 'true',
-                        help='To use domain restriction. For details see README')
-    parser.add_argument('--search-strategy',
-                        metavar='<strategy>',
-                        default=os.environ.get('SEARCH_STRATEGY', 'default'),
-                        help='The benchmarking search strategy. Can be set to default, linear-search or binary-search')
-    parser.add_argument('--threshold',
-                        type=int,
-                        metavar='<threshold>',
-                        default=os.environ.get('THRESHOLD', 2000),
-                        help='The threshold for the trend slop that the search strategies use to determine that a load could be handled')
-    return parser
-
-
-def execution_parser(description):
-    """
-    Parser for executing one use case
-    :param description: The description the argument parser should show.
-    """
-    parser = default_parser(description)
-    parser.add_argument('--exp-id',
-                        metavar='<exp id>',
-                        default=os.environ.get('EXP_ID'),
-                        help='[mandatory] ID of the experiment')
-    parser.add_argument('--load',
-                        metavar='<load>',
-                        type=int,
-                        default=os.environ.get('LOAD'),
-                        help='[mandatory] Load that should be used for benchmakr')
-    parser.add_argument('--instances',
-                        metavar='<instances>',
-                        type=int,
-                        default=os.environ.get('INSTANCES'),
-                        help='[mandatory] Numbers of instances to be benchmarked')
-    return parser
diff --git a/execution/requirements.txt b/execution/requirements.txt
deleted file mode 100644
index 18a06882007eebf69bf3bf4f84b869454b36a0a6..0000000000000000000000000000000000000000
--- a/execution/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-matplotlib==3.2.0
-pandas==1.0.1
-requests==2.23.0
-scikit-learn==0.22.2.post1
-
-# For run_uc.py
-kubernetes==11.0.0
-confuse==1.1.0
diff --git a/execution/run_uc.py b/execution/run_uc.py
deleted file mode 100644
index 904b87b377ca2db3f2d4ddd4fb70aba0136cfa21..0000000000000000000000000000000000000000
--- a/execution/run_uc.py
+++ /dev/null
@@ -1,609 +0,0 @@
-import argparse  # parse arguments from cli
-import atexit  # used to clear resources at exit of program (e.g. ctrl-c)
-from kubernetes import client, config  # kubernetes api
-from kubernetes.stream import stream
-import lag_analysis
-import logging  # logging
-from os import path, environ  # path utilities
-from lib.cli_parser import execution_parser
-import subprocess  # execute bash commands
-import sys  # for exit of program
-import time  # process sleep
-import yaml  # convert from file to yaml object
-
-coreApi = None  # acces kubernetes core api
-appsApi = None  # acces kubernetes apps api
-customApi = None  # acces kubernetes custom object api
-
-namespace = None
-
-
-def load_variables():
-    """Load the CLI variables given at the command line"""
-    print('Load CLI variables')
-    parser = execution_parser(description='Run use case Programm')
-    args = parser.parse_args()
-    print(args)
-    if (args.exp_id is None or args.uc is None or args.load is None or args.instances is None) and not args.reset_only:
-        print('The options --exp-id, --uc, --load and --instances are mandatory.')
-        print('Some might not be set!')
-        sys.exit(1)
-    return args
-
-
-def initialize_kubernetes_api():
-    """Load the kubernetes config from local or the cluster and creates
-    needed APIs.
-    """
-    global coreApi, appsApi, customApi
-    print('Connect to kubernetes api')
-    try:
-        config.load_kube_config()  # try using local config
-    except config.config_exception.ConfigException as e:
-        # load config from pod, if local config is not available
-        logging.debug(
-            'Failed loading local Kubernetes configuration try from cluster')
-        logging.debug(e)
-        config.load_incluster_config()
-
-    coreApi = client.CoreV1Api()
-    appsApi = client.AppsV1Api()
-    customApi = client.CustomObjectsApi()
-
-
-def create_topics(topics):
-    """Create the topics needed for the use cases
-    :param topics: List of topics that should be created.
-    """
-    # Calling exec and waiting for response
-    print('Create topics')
-    for (topic, partitions) in topics:
-        print(f'Create topic {topic} with #{partitions} partitions')
-        exec_command = [
-            '/bin/sh',
-            '-c',
-            f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181\
-            --create --topic {topic} --partitions {partitions}\
-            --replication-factor 1'
-        ]
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "kafka-client",
-                      namespace,
-                      command=exec_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        print(resp)
-
-
-def load_yaml(file_path):
-    """Creates a yaml file from the file at given path.
-    :param file_path: The path to the file which contains the yaml.
-    :return: The file as a yaml object.
-    """
-    try:
-        f = open(path.join(path.dirname(__file__), file_path))
-        with f:
-            return yaml.safe_load(f)
-    except Exception as e:
-        logging.error('Error opening file %s', file_path)
-        logging.error(e)
-
-
-def load_yaml_files():
-    """Load the needed yaml files and creates objects from them.
-    :return: wg, app_svc, app_svc_monitor ,app_jmx, app_deploy
-    """
-    print('Load kubernetes yaml files')
-    wg_svc = load_yaml('uc-workload-generator/load-generator-service.yaml')
-    wg = load_yaml('uc-workload-generator/workloadGenerator.yaml')
-    app_svc = load_yaml('uc-application/aggregation-service.yaml')
-    app_svc_monitor = load_yaml('uc-application/service-monitor.yaml')
-    app_jmx = load_yaml('uc-application/jmx-configmap.yaml')
-    app_deploy = load_yaml('uc-application/aggregation-deployment.yaml')
-
-    print('Kubernetes yaml files loaded')
-    return wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy
-
-
-def replace_env_value(container, key, value):
-    """
-    Special method to replace in a container with kubernetes env values
-    the value of a given parameter.
-    """
-    next(filter(lambda x: x['name'] == key, container))[
-        'value'] = value
-
-
-def start_workload_generator(svc_yaml, wg_yaml, dim_value, uc_id):
-    """Starts the workload generator.
-    :param wg_yaml: The yaml object for the workload generator service.
-    :param wg_yaml: The yaml object for the workload generator.
-    :param string dim_value: The dimension value the load generator should use.
-    :param string uc_id: Use case id for which load should be generated.
-    :return:
-        The StatefulSet created by the API or in case it already exist/error
-        the yaml object.
-    """
-    print('Start workload generator')
-    svc, wg_deploy = None, None
-
-    # Create Service
-    try:
-        svc = coreApi.create_namespaced_service(
-            namespace=namespace, body=svc_yaml)
-        print(f'Service {svc.metadata.name} created.')
-    except client.rest.ApiException as e:
-        svc = svc_yaml
-        logging.error("Service creation error: %s", e.reason)
-
-    # Create Deployment
-    num_sensors = dim_value
-    wl_max_records = 150000
-    wl_instances = (num_sensors + wl_max_records - 1) // wl_max_records
-
-    # set parameters special for uc 4
-    if uc_id == '4':
-        print('use uc4 stuff')
-        num_nested_groups = dim_value
-        num_sensors = 4
-        approx_num_sensors = num_sensors ** num_nested_groups
-        wl_instances = (approx_num_sensors +
-                        wl_max_records - 1) // wl_max_records
-
-    # Customize workload generator creations
-    wg_yaml['spec']['replicas'] = wl_instances
-    # Set used use case
-    wg_containter = next(filter(
-        lambda x: x['name'] == 'workload-generator', wg_yaml['spec']['template']['spec']['containers']))
-    wg_containter['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id + \
-        '-workload-generator:latest'
-    # Set environment variables
-
-    replace_env_value(wg_containter['env'], 'NUM_SENSORS', str(num_sensors))
-
-    if uc_id == '4':  # Special configuration for UC4
-        replace_env_value(
-            wg_containter['env'], 'NUM_NESTED_GROUPS', str(num_nested_groups))
-
-    try:
-        wg_deploy = appsApi.create_namespaced_deployment(
-            namespace=namespace,
-            body=wg_yaml
-        )
-        print(f'Deployment {wg_deploy.metadata.name} created.')
-    except client.rest.ApiException as e:
-        print(f'Deployment creation error: {e.reason}')
-        wg_deploy = wg_yaml
-
-    return svc, wg_deploy    
-
-
-def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml,
-                      instances, uc_id, memory_limit, cpu_limit,
-                      configurations):
-    """Applies the service, service monitor, jmx config map and start the
-    use case application.
-
-    :param svc_yaml: The yaml object for the service.
-    :param svc_monitor_yaml: The yaml object for the service monitor.
-    :param jmx_yaml: The yaml object for the jmx config map.
-    :param deploy_yaml: The yaml object for the application.
-    :param int instances: Number of instances for use case application.
-    :param string uc_id: The id of the use case to execute.
-    :param string memory_limit: The memory limit for the application.
-    :param string cpu_limit: The CPU limit for the application.
-    :param dict configurations: A dictionary with ENV variables for configurations.
-    :return:
-        The Service, ServiceMonitor, JMX ConfigMap and Deployment.
-        In case the resource already exist/error the yaml object is returned.
-        return svc, svc_monitor, jmx_cm, app_deploy
-    """
-    print('Start use case application')
-    svc, svc_monitor, jmx_cm, app_deploy = None, None, None, None
-
-    # Create Service
-    try:
-        svc = coreApi.create_namespaced_service(
-            namespace=namespace, body=svc_yaml)
-        print(f'Service {svc.metadata.name} created.')
-    except client.rest.ApiException as e:
-        svc = svc_yaml
-        logging.error("Service creation error: %s", e.reason)
-
-    # Create custom object service monitor
-    try:
-        svc_monitor = customApi.create_namespaced_custom_object(
-            group="monitoring.coreos.com",
-            version="v1",
-            namespace=namespace,
-            plural="servicemonitors",  # CustomResourceDef of ServiceMonitor
-            body=svc_monitor_yaml,
-        )
-        print(f"ServiceMonitor '{svc_monitor['metadata']['name']}' created.")
-    except client.rest.ApiException as e:
-        svc_monitor = svc_monitor_yaml
-        logging.error("ServiceMonitor creation error: %s", e.reason)
-
-    # Apply jmx config map for aggregation service
-    try:
-        jmx_cm = coreApi.create_namespaced_config_map(
-            namespace=namespace, body=jmx_yaml)
-        print(f"ConfigMap '{jmx_cm.metadata.name}' created.")
-    except client.rest.ApiException as e:
-        jmx_cm = jmx_yaml
-        logging.error("ConfigMap creation error: %s", e.reason)
-
-    # Create deployment
-    deploy_yaml['spec']['replicas'] = instances
-    app_container = next(filter(
-        lambda x: x['name'] == 'uc-application',
-        deploy_yaml['spec']['template']['spec']['containers']))
-    app_container['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id \
-        + '-kstreams-app:latest'
-
-    # Set configurations environment parameters for SPE
-    for k, v in configurations.items():
-        # check if environment variable is already definde in yaml
-        env = next(filter(lambda x: x['name'] == k,
-                          app_container['env']), None)
-        if env is not None:
-            env['value'] = v  # replace value
-        else:
-            # create new environment pair
-            conf = {'name': k, 'value': v}
-            app_container['env'].append(conf)
-
-    # Set resources in Kubernetes
-    app_container['resources']['limits']['memory'] = memory_limit
-    app_container['resources']['limits']['cpu'] = cpu_limit
-
-    # Deploy application
-    try:
-        app_deploy = appsApi.create_namespaced_deployment(
-            namespace=namespace,
-            body=deploy_yaml
-        )
-        print(f"Deployment '{app_deploy.metadata.name}' created.")
-    except client.rest.ApiException as e:
-        app_deploy = deploy_yaml
-        logging.error("Deployment creation error: %s", e.reason)
-
-    return svc, svc_monitor, jmx_cm, app_deploy
-
-
-def wait_execution(execution_minutes):
-    """
-    Wait time while in execution.
-    :param int execution_minutes: The duration to wait for execution.
-    """
-    print('Wait while executing')
-
-    for i in range(execution_minutes):
-        time.sleep(60)
-        print(f'Executed: {i+1} minutes')
-    print('Execution finished')
-    return
-
-
-def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
-    """
-    Runs the evaluation function
-    :param string exp_id: ID of the experiment.
-    :param string uc_id: ID of the executed use case.
-    :param int dim_value: The dimension value used for execution.
-    :param int instances: The number of instances used for the execution.
-    :param int execution_minutes: How long the use case where executed.
-    """
-    print('Run evaluation function')
-    try:
-        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances,
-                          execution_minutes, prometheus_base_url,
-                          result_path)
-    except Exception as e:
-        err_msg = 'Evaluation function failed'
-        print(err_msg)
-        logging.exception(err_msg)
-        print('Benchmark execution continues')
-
-    return
-
-
-def delete_resource(obj, del_func):
-    """
-    Helper function to delete kuberentes resources.
-    First tries to delete with the kuberentes object.
-    Then it uses the dict representation of yaml to delete the object.
-    :param obj: Either kubernetes resource object or yaml as a dict.
-    :param del_func: The function that need to be executed for deletion
-    """
-    try:
-        del_func(obj.metadata.name, namespace)
-    except Exception as e:
-        logging.debug(
-            'Error deleting resource with api object, try with dict.')
-        try:
-            del_func(obj['metadata']['name'], namespace)
-        except Exception as e:
-            logging.error("Error deleting resource")
-            logging.error(e)
-            return
-    print('Resource deleted')
-
-
-def stop_applications(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy):
-    """Stops the applied applications and delete resources.
-    :param wg: The load generator service.
-    :param wg: The load generator deployment.
-    :param app_svc: The application service.
-    :param app_svc_monitor: The application service monitor.
-    :param app_jmx: The application jmx config map.
-    :param app_deploy: The application deployment.
-    """
-    print('Stop use case application and load generator')
-
-    print('Delete load generator deployment')
-    delete_resource(wg, appsApi.delete_namespaced_deployment)
-
-    print('Delete load generator service')
-    delete_resource(wg_svc, coreApi.delete_namespaced_service)
-
-    print('Delete app service')
-    delete_resource(app_svc, coreApi.delete_namespaced_service)
-
-    print('Delete service monitor')
-    try:
-        customApi.delete_namespaced_custom_object(
-            group="monitoring.coreos.com",
-            version="v1",
-            namespace=namespace,
-            plural="servicemonitors",
-            name=app_svc_monitor['metadata']['name'])
-        print('Resource deleted')
-    except Exception as e:
-        print('Error deleting service monitor')
-
-    print('Delete jmx config map')
-    delete_resource(app_jmx, coreApi.delete_namespaced_config_map)
-
-    print('Delete uc application')
-    delete_resource(app_deploy, appsApi.delete_namespaced_deployment)
-
-    print('Check all pods deleted.')
-    while True:
-        # Wait bit for deletion
-        time.sleep(2)
-
-        # Count how many pod still need to be deleted
-        no_load = len(coreApi.list_namespaced_pod(
-            namespace, label_selector='app=titan-ccp-load-generator').items)
-        no_uc = len(coreApi.list_namespaced_pod(
-            namespace, label_selector='app=titan-ccp-aggregation').items)
-
-        # Check if all pods deleted
-        if no_load <= 0 and no_uc <= 0:
-            print('All pods deleted.')
-            break
-
-        print(f'#{no_load} load generator and #{no_uc} uc pods needs to be deleted')
-    return
-
-
-def delete_topics(topics):
-    """Delete topics from Kafka.
-    :param topics: List of topics to delete.
-    """
-    print('Delete topics from Kafka')
-
-    topics_delete = 'theodolite-.*|' + '|'.join([ti[0] for ti in topics])
-
-    num_topics_command = [
-        '/bin/sh',
-        '-c',
-        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list \
-        | sed -n -E "/^({topics_delete})\
-        ( - marked for deletion)?$/p" | wc -l'
-    ]
-
-    topics_deletion_command = [
-        '/bin/sh',
-        '-c',
-        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete \
-        --topic "{topics_delete}"'
-    ]
-
-    # Wait that topics get deleted
-    while True:
-        # topic deletion, sometimes a second deletion seems to be required
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "kafka-client",
-                      namespace,
-                      command=topics_deletion_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        print(resp)
-
-        print('Wait for topic deletion')
-        time.sleep(2)
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "kafka-client",
-                      namespace,
-                      command=num_topics_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        if resp == '0':
-            print('Topics deleted')
-            break
-    return
-
-
-def reset_zookeeper():
-    """Delete ZooKeeper configurations used for workload generation.
-    """
-    print('Delete ZooKeeper configurations used for workload generation')
-
-    delete_zoo_data_command = [
-        '/bin/sh',
-        '-c',
-        'zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall '
-        + '/workload-generation'
-    ]
-
-    check_zoo_data_command = [
-        '/bin/sh',
-        '-c',
-        'zookeeper-shell my-confluent-cp-zookeeper:2181 get '
-        + '/workload-generation'
-    ]
-
-    # Wait for configuration deletion
-    while True:
-        # Delete Zookeeper configuration data
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "zookeeper-client",
-                      namespace,
-                      command=delete_zoo_data_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        logging.debug(resp)
-
-        # Check data is deleted
-        client = stream(coreApi.connect_get_namespaced_pod_exec,
-                        "zookeeper-client",
-                        namespace,
-                        command=check_zoo_data_command,
-                        stderr=True, stdin=False,
-                        stdout=True, tty=False,
-                        _preload_content=False)  # Get client for returncode
-        client.run_forever(timeout=60)  # Start the client
-
-        if client.returncode == 1:  # Means data not available anymore
-            print('ZooKeeper reset was successful.')
-            break
-        else:
-            print('ZooKeeper reset was not successful. Retrying in 5s.')
-            time.sleep(5)
-    return
-
-
-def stop_lag_exporter():
-    """
-    Stop the lag exporter in order to reset it and allow smooth execution for
-    next use cases.
-    """
-    print('Stop the lag exporter')
-
-    try:
-        # Get lag exporter
-        pod_list = coreApi.list_namespaced_pod(
-            namespace=namespace, label_selector='app.kubernetes.io/name=kafka-lag-exporter')
-        lag_exporter_pod = pod_list.items[0].metadata.name
-
-        # Delete lag exporter pod
-        res = coreApi.delete_namespaced_pod(
-            name=lag_exporter_pod, namespace=namespace)
-    except ApiException as e:
-        logging.error('Exception while stopping lag exporter')
-        logging.error(e)
-
-    print('Deleted lag exporter pod: ' + lag_exporter_pod)
-    return
-
-
-def reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics):
-    """
-    Stop the applications, delete topics, reset zookeeper and stop lag exporter.
-    """
-    print('Reset cluster')
-    stop_applications(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy)
-    print('---------------------')
-    delete_topics(topics)
-    print('---------------------')
-    reset_zookeeper()
-    print('---------------------')
-    stop_lag_exporter()
-
-
-def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, execution_minutes, prometheus_base_url, reset, ns, result_path, configurations, reset_only=False):
-    """
-    Main method to execute one time the benchmark for a given use case.
-    Start workload generator/application -> execute -> analyse -> stop all
-    :param string exp_id: The number of executed experiment
-    :param string uc_id: Use case to execute
-    :param int dim_value: Dimension value for load generator.
-    :param int instances: Number of instances for application.
-    :param int partitions: Number of partitions the kafka topics should have.
-    :param string cpu_limit: Max CPU utilazation for application.
-    :param string memory_limit: Max memory utilazation for application.
-    :param int execution_minutes: How long to execute the benchmark.
-    :param boolean reset: Flag for reset of cluster before execution.
-    :param dict configurations: Key value pairs for setting env variables of UC.
-    :param boolean reset_only: Flag to only reset the application.
-    """
-    global namespace
-    namespace = ns
-    wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy = load_yaml_files()
-    print('---------------------')
-
-    initialize_kubernetes_api()
-    print('---------------------')
-
-    topics = [('input', partitions),
-              ('output', partitions),
-              ('aggregation-feedback', partitions),
-              ('configuration', 1)]
-
-    # Check for reset options
-    if reset_only:
-        # Only reset cluster an then end program
-        reset_cluster(wg_svc, wg, app_svc, app_svc_monitor,
-                      app_jmx, app_deploy, topics)
-        sys.exit()
-    if reset:
-        # Reset cluster before execution
-        print('Reset only mode')
-        reset_cluster(wg_svc, wg, app_svc, app_svc_monitor,
-                      app_jmx, app_deploy, topics)
-        print('---------------------')
-
-    # Register the reset operation so that is executed at the abort of program
-    atexit.register(reset_cluster, wg_svc, wg, app_svc,
-                    app_svc_monitor, app_jmx, app_deploy, topics)
-
-    create_topics(topics)
-    print('---------------------')
-
-    wg_svc, wg = start_workload_generator(wg_svc, wg, dim_value, uc_id)
-    print('---------------------')
-
-    app_svc, app_svc_monitor, app_jmx, app_deploy = start_application(
-        app_svc,
-        app_svc_monitor,
-        app_jmx,
-        app_deploy,
-        instances,
-        uc_id,
-        memory_limit,
-        cpu_limit,
-        configurations)
-    print('---------------------')
-
-    wait_execution(execution_minutes)
-    print('---------------------')
-
-    run_evaluation(exp_id, uc_id, dim_value, instances,
-                   execution_minutes, prometheus_base_url, result_path)
-    print('---------------------')
-
-    # Reset cluster regular, therefore abort exit not needed anymore
-    reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
-    atexit.unregister(reset_cluster)
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-    args = load_variables()
-    print('---------------------')
-    main(args.exp_id, args.uc, args.load, args.instances, args.partitions,
-         args.cpu_limit, args.memory_limit, args.duration, args.prometheus,
-         args.reset, args.namespace, args.path, args.configurations,
-         args.reset_only)
diff --git a/execution/strategies/__init__.py b/execution/strategies/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/execution/strategies/config.py b/execution/strategies/config.py
deleted file mode 100644
index d4df97c18ae54c7c181ddf08264c013f9447350f..0000000000000000000000000000000000000000
--- a/execution/strategies/config.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from dataclasses import dataclass
-
-@dataclass
-class ExperimentConfig:
-    """ Wrapper for the configuration of an experiment. """
-    use_case: str
-    exp_id: int
-    dim_values: list
-    replicass: list
-    partitions: int
-    cpu_limit: str
-    memory_limit: str
-    execution_minutes: int
-    prometheus_base_url: str
-    reset: bool
-    namespace: str
-    result_path: str
-    configurations: dict
-    domain_restriction_strategy: object
-    search_strategy: object
-    threshold: int
-    subexperiment_executor: object
-    subexperiment_evaluator: object
diff --git a/execution/strategies/experiment_execution.py b/execution/strategies/experiment_execution.py
deleted file mode 100644
index c2ee18f9b79a6e880dbcb69b47061cc5ecc6b9ba..0000000000000000000000000000000000000000
--- a/execution/strategies/experiment_execution.py
+++ /dev/null
@@ -1,6 +0,0 @@
-class ExperimentExecutor:
-    def __init__(self, config):
-        self.config=config
-    
-    def execute(self):
-        self.config.domain_restriction_strategy.execute(self.config)
diff --git a/execution/strategies/strategies/config.py b/execution/strategies/strategies/config.py
deleted file mode 100644
index 5c31f8c97a4085931cdfa1fa017d4e5909e21915..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/config.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from dataclasses import dataclass
-
-@dataclass
-class SubexperimentConfig:
-    """ Wrapper for the configuration of a subexperiment """
-    use_case: str
-    exp_id: int
-    counter: int
-    dim_value: int
-    replicas: int
-    partitions: int
-    cpu_limit: str
-    memory_limit: str
-    execution_minutes: int
-    prometheus_base_url: str
-    reset: bool
-    namespace: str
-    result_path: str
-    configurations: dict
diff --git a/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py b/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py
deleted file mode 100644
index b218731fc76d83347b4dbf10448f01615d378c0b..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# The lower bound strategy
-def execute(config):
-    dim_value_index = 0
-    lower_bound_replicas_index = 0
-    subexperiment_counter = 0
-    while dim_value_index < len(config.dim_values) and lower_bound_replicas_index >= 0 and lower_bound_replicas_index < len(config.replicass):
-        lower_bound_replicas_index, subexperiment_counter = config.search_strategy.execute(
-            config=config,
-            dim_value_index=dim_value_index,
-            lower_replicas_bound_index=lower_bound_replicas_index,
-            subexperiment_counter=subexperiment_counter)
-        dim_value_index+=1
\ No newline at end of file
diff --git a/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py b/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py
deleted file mode 100644
index e5dea56118460b0dfdc6b1c36ce2587b6752512b..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# The strategy where the domain contains all amounts of instances
-def execute(config):
-    dim_value_index = 0
-    subexperiment_counter = 0
-    while dim_value_index < len(config.dim_values):
-        _, subexperiment_counter = config.search_strategy.execute(
-            config=config,
-            dim_value_index=dim_value_index,
-            lower_replicas_bound_index=0,
-            subexperiment_counter=subexperiment_counter)
-        dim_value_index+=1
\ No newline at end of file
diff --git a/execution/strategies/strategies/search/binary_search_strategy.py b/execution/strategies/strategies/search/binary_search_strategy.py
deleted file mode 100644
index 46748cbda250597b3a7644522126268be4599293..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/search/binary_search_strategy.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# The binary search strategy
-import os
-from strategies.strategies.config import SubexperimentConfig
-
-def binary_search(config, dim_value, lower, upper, subexperiment_counter):
-    if lower == upper:
-        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success: # successful, the upper neighbor is assumed to also has been successful
-            return (lower, subexperiment_counter+1)
-        else: # not successful
-            return (lower+1, subexperiment_counter)
-    elif lower+1==upper:
-        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success: # minimal instances found
-            return (lower, subexperiment_counter)
-        else: # not successful, check if lower+1 instances are sufficient
-            print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[upper]}")
-            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-            config.subexperiment_executor.execute(subexperiment_config)
-            success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                             config.threshold)
-            if success: # minimal instances found
-                return (upper, subexperiment_counter)
-            else:
-                return (upper+1, subexperiment_counter)
-    else:
-        # test mid
-        mid=(upper+lower)//2
-        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[mid]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success: # success -> search in (lower, mid-1)
-            return binary_search(config, dim_value, lower, mid-1, subexperiment_counter+1)
-        else: # not success -> search in (mid+1, upper)
-            return binary_search(config, dim_value, mid+1, upper, subexperiment_counter+1)
-
-def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    upper = len(config.replicass)-1
-    dim_value=config.dim_values[dim_value_index]
-    return binary_search(config, dim_value, lower_replicas_bound_index, upper, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/check_all_strategy.py b/execution/strategies/strategies/search/check_all_strategy.py
deleted file mode 100644
index 0861945113b829fa79317d8a1a6312b4d6e4f71d..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/search/check_all_strategy.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# The check_all strategy
-import os
-from strategies.strategies.config import SubexperimentConfig
-
-
-def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    new_lower_replicas_bound_index = lower_replicas_bound_index
-    new_lower_replicas_bound_found = False
-    subexperiments_total = len(config.dim_values) * len(config.replicass)
-    while lower_replicas_bound_index < len(config.replicass):
-        subexperiment_counter += 1
-        dim_value = config.dim_values[dim_value_index]
-        replicas = config.replicass[lower_replicas_bound_index]
-        print(
-            f"Run subexperiment {subexperiment_counter} of {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
-
-        subexperiment_config = SubexperimentConfig(
-            config.use_case, config.exp_id, subexperiment_counter, dim_value,
-            replicas, config.partitions, config.cpu_limit, config.memory_limit,
-            config.execution_minutes, config.prometheus_base_url, config.reset,
-            config.namespace, config.result_path, config.configurations)
-
-        config.subexperiment_executor.execute(subexperiment_config)
-
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success and not new_lower_replicas_bound_found:
-            new_lower_replicas_bound_found = True
-            new_lower_replicas_bound_index = lower_replicas_bound_index
-        lower_replicas_bound_index += 1
-    return (new_lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/linear_search_strategy.py b/execution/strategies/strategies/search/linear_search_strategy.py
deleted file mode 100644
index 8e777303742e54cf2a11a1bde60e95b8aa85489d..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/search/linear_search_strategy.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# The linear-search strategy
-
-import os
-from strategies.strategies.config import SubexperimentConfig
-
-def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    subexperiments_total=len(config.dim_values)+len(config.replicass)-1
-    dim_value=config.dim_values[dim_value_index]
-    while lower_replicas_bound_index < len(config.replicass):
-        subexperiment_counter+=1
-        replicas=config.replicass[lower_replicas_bound_index]
-        print(f"Run subexperiment {subexperiment_counter} from at most {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
-
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success:
-            return (lower_replicas_bound_index, subexperiment_counter)
-        else:
-            lower_replicas_bound_index+=1
-    return (lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
deleted file mode 100644
index 30188de837746b76113ec635ca77fadc3a91cb92..0000000000000000000000000000000000000000
--- a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import lib.trend_slope_computer as trend_slope_computer
-import logging
-import os
-
-WARMUP_SEC = 60
-
-def execute(config, threshold):
-    """
-    Check the trend slope of the totallag of the subexperiment if it comes below
-    the threshold.
-
-    :param config: Configuration of the subexperiment.
-    :param threshold: The threshold the trendslope need to come below.
-    """
-    cwd = f'{os.getcwd()}/{config.result_path}'
-    file = f"exp{config.exp_id}_uc{config.use_case}_{config.dim_value}_{config.replicas}_totallag.csv"
-
-    try:
-        trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC)
-    except Exception as e:
-        err_msg = 'Computing trend slope failed'
-        print(err_msg)
-        logging.exception(err_msg)
-        print('Mark this subexperiment as not successful and continue benchmark')
-        return False
-
-    print(f"Trend Slope: {trend_slope}")
-
-    return trend_slope < threshold
diff --git a/execution/strategies/subexperiment_execution/subexperiment_executor.py b/execution/strategies/subexperiment_execution/subexperiment_executor.py
deleted file mode 100644
index 6931dacfc72081cbe112c4d6d1003703ba42c526..0000000000000000000000000000000000000000
--- a/execution/strategies/subexperiment_execution/subexperiment_executor.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Wrapper that makes the execution method of a subexperiment interchangable.
-
-import os
-import run_uc
-
-def execute(subexperiment_config):
-    run_uc.main(
-        exp_id=subexperiment_config.exp_id,
-        uc_id=subexperiment_config.use_case,
-        dim_value=int(subexperiment_config.dim_value),
-        instances=int(subexperiment_config.replicas),
-        partitions=subexperiment_config.partitions,
-        cpu_limit=subexperiment_config.cpu_limit,
-        memory_limit=subexperiment_config.memory_limit,
-        execution_minutes=int(subexperiment_config.execution_minutes),
-        prometheus_base_url=subexperiment_config.prometheus_base_url,
-        reset=subexperiment_config.reset,
-        ns=subexperiment_config.namespace,
-        result_path=subexperiment_config.result_path,
-        configurations=subexperiment_config.configurations)
diff --git a/execution/strategies/tests/.gitignore b/execution/strategies/tests/.gitignore
deleted file mode 100644
index 1998c294f84ec0ff4b32396e4cd8e74e352672e6..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.cache
\ No newline at end of file
diff --git a/execution/strategies/tests/__init__.py b/execution/strategies/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py b/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py
deleted file mode 100644
index d93d4924cf09015c714604f2fc995e1db971e69d..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.binary_search_strategy as binary_search_strategy
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as common known arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-expected_order = [
-        (0,3), # workload dim 0
-        (0,1), 
-        (0,0),
-        (1,3), # workload dim 1
-        (1,1),
-        (1,2),
-        (2,4), # workload dim 2
-        (2,2),
-        (3,4), # workload dim 3
-        (3,2),
-        (3,3),
-        (4,4), # workload dim 4
-        (4,3),
-        (5,5), # workload dim 5
-        (5,6),
-        (6,6) # workload dim 6
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(last_experiment)
-    print("Index was expected to be:")
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_binary_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=lower_bound_strategy,
-        search_strategy=binary_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_domain_restriction_check_all_strategy.py b/execution/strategies/tests/test_domain_restriction_check_all_strategy.py
deleted file mode 100644
index c15daca6ebab3171f0995c048afe56c0185efe56..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_domain_restriction_check_all_strategy.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.check_all_strategy as check_all_strategy
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0), # workload dim 0
-        (0,1),
-        (0,2),
-        (0,3),
-        (0,4),
-        (0,5),
-        (0,6),
-        (1,0), # workload dim 1
-        (1,1),
-        (1,2),
-        (1,3),
-        (1,4),
-        (1,5),
-        (1,6),
-        (2,2), # workload dim 2
-        (2,3),
-        (2,4),
-        (2,5),
-        (2,6),
-        (3,2), # workload dim 3
-        (3,3),
-        (3,4),
-        (3,5),
-        (3,6),
-        (4,3), # workload dim 4
-        (4,4),
-        (4,5),
-        (4,6),
-        (5,4), # workload dim 3
-        (5,5),
-        (5,6),
-        (6,6) # workload dim 6
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=lower_bound_strategy,
-        search_strategy=check_all_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py b/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py
deleted file mode 100644
index 86e2cd29d187cb83166102c503ee79e5e1424573..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.linear_search_strategy as linear_search_strategy
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0),
-        (1,0),
-        (1,1),
-        (1,2),
-        (2,2),
-        (3,2),
-        (3,3),
-        (4,3),
-        (4,4),
-        (5,4),
-        (5,5),
-        (5,6),
-        (6,6)
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=lower_bound_strategy,
-        search_strategy=linear_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_binary_search_strategy.py b/execution/strategies/tests/test_no_restriction_binary_search_strategy.py
deleted file mode 100644
index 4f5da89cc72edd792015763539c9af4677772a79..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_no_restriction_binary_search_strategy.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.binary_search_strategy as binary_search_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as common known arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-expected_order = [
-        (0,3), # workload dim 0
-        (0,1), 
-        (0,0),
-        (1,3), # workload dim 1
-        (1,1),
-        (1,2),
-        (2,3), # workload dim 2
-        (2,1),
-        (2,2),
-        (3,3), # workload dim 3
-        (3,1),
-        (3,2),
-        (4,3), # workload dim 4
-        (4,5),
-        (4,4),
-        (5,3), # workload dim 5
-        (5,5),
-        (5,6),
-        (6,3), # workload dim 6
-        (6,5),
-        (6,6)
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(last_experiment)
-    print("Index was expected to be:")
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_binary_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=no_lower_bound_strategy,
-        search_strategy=binary_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_check_all_strategy.py b/execution/strategies/tests/test_no_restriction_check_all_strategy.py
deleted file mode 100644
index f173a3d168704cc7a499933984b6510ebda2751e..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_no_restriction_check_all_strategy.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.check_all_strategy as check_all_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0), # workload dim 0
-        (0,1),
-        (0,2),
-        (0,3),
-        (0,4),
-        (0,5),
-        (0,6),
-        (1,0), # workload dim 1
-        (1,1),
-        (1,2),
-        (1,3),
-        (1,4),
-        (1,5),
-        (1,6),
-        (2,0), # workload dim 2
-        (2,1),
-        (2,2), 
-        (2,3),
-        (2,4),
-        (2,5),
-        (2,6),
-        (3,0), # workload dim 4
-        (3,1),
-        (3,2), 
-        (3,3),
-        (3,4),
-        (3,5),
-        (3,6),
-        (4,0), # workload dim 4
-        (4,1),
-        (4,2), 
-        (4,3),
-        (4,4),
-        (4,5),
-        (4,6),
-        (5,0), # workload dim 5
-        (5,1),
-        (5,2), 
-        (5,3),
-        (5,4),
-        (5,5),
-        (5,6),
-        (6,0), # workload dim 6
-        (6,1),
-        (6,2), 
-        (6,3),
-        (6,4),
-        (6,5),
-        (6,6),
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=no_lower_bound_strategy,
-        search_strategy=check_all_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_linear_search_strategy.py b/execution/strategies/tests/test_no_restriction_linear_search_strategy.py
deleted file mode 100644
index 0e47c2e95b75ae682e82a02ad3d0a91c5a62f253..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_no_restriction_linear_search_strategy.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.linear_search_strategy as linear_search_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0), # workload dim 0
-        (1,0), # workload dim 1
-        (1,1),
-        (1,2),
-        (2,0), # workload dim 2
-        (2,1),
-        (2,2),
-        (3,0), # workload dim 3
-        (3,1),
-        (3,2),
-        (3,3),
-        (4,0), # workload dim 4
-        (4,1),
-        (4,2),
-        (4,3),
-        (4,4),
-        (5,0), # workload dim 5
-        (5,1),
-        (5,2),
-        (5,3),
-        (5,4),
-        (5,5),
-        (5,6),
-        (6,0), # workload dim 6
-        (6,1),
-        (6,2),
-        (6,3),
-        (6,4),
-        (6,5),
-        (6,6)
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=no_lower_bound_strategy,
-        search_strategy=linear_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/theodolite.py b/execution/theodolite.py
deleted file mode 100755
index bd273c4405e2a406b5b5537e084957625c19aa96..0000000000000000000000000000000000000000
--- a/execution/theodolite.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-from lib.cli_parser import benchmark_parser
-import logging  # logging
-import os
-import run_uc
-import sys
-from strategies.config import ExperimentConfig
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-import strategies.strategies.search.check_all_strategy as check_all_strategy
-import strategies.strategies.search.linear_search_strategy as linear_search_strategy
-import strategies.strategies.search.binary_search_strategy as binary_search_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-import strategies.subexperiment_evaluation.subexperiment_evaluator as subexperiment_evaluator
-
-
-def load_variables():
-    """Load the CLI variables given at the command line"""
-    print('Load CLI variables')
-    parser = benchmark_parser("Run theodolite benchmarking")
-    args = parser.parse_args()
-    print(args)
-    if (args.uc is None or args.loads is None or args.instances_list is None) and not args.reset_only:
-        print('The options --uc, --loads and --instances are mandatory.')
-        print('Some might not be set!')
-        sys.exit(1)
-    return args
-
-
-def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
-         duration, domain_restriction, search_strategy, threshold,
-         prometheus_base_url, reset, namespace, result_path, configurations):
-
-    print(
-        f"Domain restriction of search space activated: {domain_restriction}")
-    print(f"Chosen search strategy: {search_strategy}")
-
-    counter_path = f"{result_path}/exp_counter.txt"
-
-    if os.path.exists(counter_path):
-        with open(counter_path, mode="r") as read_stream:
-            exp_id = int(read_stream.read())
-    else:
-        exp_id = 0
-        # Create the directory if not exists
-        os.makedirs(result_path, exist_ok=True)
-
-    # Store metadata
-    separator = ","
-    lines = [
-        f'UC={uc}\n',
-        f'DIM_VALUES={separator.join(map(str, loads))}\n',
-        f'REPLICAS={separator.join(map(str, instances_list))}\n',
-        f'PARTITIONS={partitions}\n',
-        f'CPU_LIMIT={cpu_limit}\n',
-        f'MEMORY_LIMIT={memory_limit}\n',
-        f'EXECUTION_MINUTES={duration}\n',
-        f'DOMAIN_RESTRICTION={domain_restriction}\n',
-        f'SEARCH_STRATEGY={search_strategy}\n',
-        f'CONFIGURATIONS={configurations}'
-    ]
-    with open(f"{result_path}/exp{exp_id}_uc{uc}_meta.txt", "w") as stream:
-        stream.writelines(lines)
-
-    with open(counter_path, mode="w") as write_stream:
-        write_stream.write(str(exp_id + 1))
-
-    domain_restriction_strategy = None
-    search_strategy_method = None
-
-    # Select domain restriction
-    if domain_restriction:
-        # domain restriction
-        domain_restriction_strategy = lower_bound_strategy
-    else:
-        # no domain restriction
-        domain_restriction_strategy = no_lower_bound_strategy
-
-    # select search strategy
-    if search_strategy == "linear-search":
-        print(
-            f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
-        search_strategy_method = linear_search_strategy
-    elif search_strategy == "binary-search":
-        search_strategy_method = binary_search_strategy
-    else:
-        print(
-            f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
-        search_strategy_method = check_all_strategy
-
-    experiment_config = ExperimentConfig(
-        use_case=uc,
-        exp_id=exp_id,
-        dim_values=loads,
-        replicass=instances_list,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        execution_minutes=duration,
-        prometheus_base_url=prometheus_base_url,
-        reset=reset,
-        namespace=namespace,
-        configurations=configurations,
-        result_path=result_path,
-        domain_restriction_strategy=domain_restriction_strategy,
-        search_strategy=search_strategy_method,
-        threshold=threshold,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-    args = load_variables()
-    if args.reset_only:
-        print('Only reset the cluster')
-        run_uc.main(None, None, None, None, None, None, None, None, None,
-                    None, args.namespace, None, None, reset_only=True)
-    else:
-        main(args.uc, args.loads, args.instances_list, args.partitions,
-             args.cpu_limit, args.memory_limit, args.duration,
-             args.domain_restriction, args.search_strategy,
-             args.threshold, args.prometheus, args.reset, args.namespace,
-             args.path, args.configurations)
diff --git a/execution/theodolite.yaml b/execution/theodolite.yaml
index 06d14a0f589b2ac7a16ebaaae4d1490b840ea57b..ae18a68ee61c71e20008a71537357cdf9521216a 100644
--- a/execution/theodolite.yaml
+++ b/execution/theodolite.yaml
@@ -5,47 +5,60 @@ metadata:
 spec:
   template:
     spec:
-      volumes:
-      - name: theodolite-pv-storage
-        persistentVolumeClaim:
-          claimName: theodolite-pv-claim
+      securityContext:
+        runAsUser: 0 # Set the permissions for write access to the volumes.
       containers:
+        - name: lag-analysis
+          image: ghcr.io/cau-se/theodolite-slo-checker-lag-trend:latest
+          ports:
+          - containerPort: 80
+            name: analysis
         - name: theodolite
           image: ghcr.io/cau-se/theodolite:latest
-          # imagePullPolicy: Never # Used to pull "own" local image
+          imagePullPolicy: Always
           env:
-            - name: UC # mandatory
-              value: "1"
-            - name: LOADS # mandatory
-              value: "100000, 200000"
-            - name: INSTANCES # mandatory
-              value: "1, 2, 3"
-            # - name: DURATION
-            #   value: "5"
-            # - name: PARTITIONS
-            #   value: "40"
-            # - name: DOMAIN_RESTRICTION
-            #   value: "True"
-            # - name: SEARCH_STRATEGY
-            #   value: "linear-search"
-            # - name: CPU_LIMIT
-            #   value: "1000m"
-            # - name: MEMORY_LIMIT
-            #   value: "4Gi"
-            - name: PROMETHEUS_BASE_URL
-              value: "http://prometheus-operated:9090"
-            # - name: NAMESPACE
-            #   value: "default"
-            # - name: CONFIGURATIONS
-            #   value: "COMMIT_INTERVAL_MS=100, NUM_STREAM_THREADS=1"
-            - name: RESULT_PATH
-              value: "results"
-            - name: PYTHONUNBUFFERED # Enable logs in Kubernetes
-              value: "1"
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+
+            # - name: MODE
+            #   value: yaml-executor # Default is `yaml-executor`
+            - name: THEODOLITE_EXECUTION
+              value: "execution/execution.yaml" # The name of this file must correspond to the filename of the execution, from which the config map is created.
+            - name: THEODOLITE_BENCHMARK
+              value: "benchmark/benchmark.yaml" # The name of this file must correspond to the filename of the benchmark, from which the config map is created.
+            - name: THEODOLITE_APP_RESOURCES
+              value: "benchmark-resources"
+            - name: RESULTS_FOLDER # Folder for saving results
+              value: results # Default is the pwd (/deployments)
+            # - name: CREATE_RESULTS_FOLDER # Specify whether the specified result folder should be created if it does not exist.
+            #   value: "false" # Default is false.
           volumeMounts:
-            - mountPath: "/app/results"
+            - mountPath: "/deployments/results" # the mounted path must corresponds to the value of `RESULT_FOLDER`.
               name: theodolite-pv-storage
+            - mountPath: "/deployments/benchmark-resources" # must correspond to the value of `THEODOLITE_APP_RESOURCES`.
+              name: benchmark-resources
+            - mountPath: "/deployments/benchmark"  # must correspond to the value of `THEODOLITE_BENCHMARK`.
+              name: benchmark
+            - mountPath: "/deployments/execution" # must correspond to the value of `THEODOLITE_EXECUTION`.
+              name: execution
       restartPolicy: Never
       # Uncomment if RBAC is enabled and configured
-      # serviceAccountName: theodolite
-  backoffLimit: 4
+      serviceAccountName: theodolite
+      # Multiple volumes are needed to provide the corresponding files.
+      # The names must correspond to the created configmaps and the volumeMounts.
+      volumes:
+        - name: theodolite-pv-storage
+          persistentVolumeClaim:
+            claimName: theodolite-pv-claim
+        - name: benchmark-resources
+          configMap:
+            name: benchmark-resources-configmap
+        - name: benchmark
+          configMap:
+            name: benchmark-configmap
+        - name: execution
+          configMap:
+            name: execution-configmap
+  backoffLimit: 4
\ No newline at end of file
diff --git a/execution/uc-application/aggregation-deployment.yaml b/execution/uc-application/aggregation-deployment.yaml
deleted file mode 100644
index 07732ca1dd1e6b2b06f098dfb10a53d38e8d5cae..0000000000000000000000000000000000000000
--- a/execution/uc-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc-application
-        image: uc-app:latest
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        - name: COMMIT_INTERVAL_MS # Set as default for the applications
-          value: "100"
-        resources:
-          limits:
-            memory: 4Gi
-            cpu: 1000m
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc-workload-generator/workloadGenerator.yaml b/execution/uc-workload-generator/workloadGenerator.yaml
deleted file mode 100644
index 146e285f66d4c0e1a88d613e4ac2d5571234fad6..0000000000000000000000000000000000000000
--- a/execution/uc-workload-generator/workloadGenerator.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: workload-generator:latest
-        ports:
-        - containerPort: 5701
-          name: coordination
-        env:
-        # Order need to be preserved for run_uc.py
-        - name: NUM_SENSORS
-          value: "25000"
-        - name: NUM_NESTED_GROUPS
-          value: "5"
-        - name: KUBERNETES_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        - name: KUBERNETES_DNS_NAME
-          value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
diff --git a/helm/.gitignore b/helm/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..80bf7fc709ac6d08e703fe9f24d7d5776e26830e
--- /dev/null
+++ b/helm/.gitignore
@@ -0,0 +1 @@
+charts
\ No newline at end of file
diff --git a/helm/.helmignore b/helm/.helmignore
new file mode 100644
index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778
--- /dev/null
+++ b/helm/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/helm/Chart.yaml b/helm/Chart.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0e56a156832ed6f9159f436ec63f825d132e8dd3
--- /dev/null
+++ b/helm/Chart.yaml
@@ -0,0 +1,34 @@
+apiVersion: v2
+name: theodolite
+description: Theodolite is a framework for benchmarking the scalability stream processing engines.
+home: https://cau-se.github.io/theodolite
+sources:
+  - https://github.com/cau-se/theodolite
+maintainers:
+- name: Sören Henning
+  email: soeren.henning@email.uni-kiel.de
+  url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
+
+type: application
+
+dependencies:
+  - name: grafana
+    version: 6.17.5
+    repository: https://grafana.github.io/helm-charts
+    condition: grafana.enabled
+  - name: kube-prometheus-stack
+    version:  20.0.1
+    repository: https://prometheus-community.github.io/helm-charts
+    condition: kube-prometheus-stack.enabled
+  - name: cp-helm-charts
+    version: 0.6.0
+    repository: https://soerenhenning.github.io/cp-helm-charts
+    condition: cp-helm-charts.enabled
+  - name: kafka-lag-exporter
+    version: 0.6.7
+    repository: https://lightbend.github.io/kafka-lag-exporter/repo/
+    condition: kafka-lag-exporter.enabled
+
+version: 0.6.0-SNAPSHOT
+
+appVersion: 0.6.0-SNAPSHOT
diff --git a/helm/README.md b/helm/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..af253482bcbacf628fd718eb70b3b157cc06e3f8
--- /dev/null
+++ b/helm/README.md
@@ -0,0 +1,82 @@
+# Theodolite Helm Chart
+
+## Installation
+
+The Theodolite Helm chart with all its dependencies can be installed via:
+
+```sh
+helm dependencies update .
+helm install theodolite .
+```
+
+**Hint for Windows users:** The Theodolite Helm chart makes use of some symbolic links. These are not properly created when this repository is checked out with Windows. There are a couple of solutions presented in this [Stack Overflow post](https://stackoverflow.com/q/5917249/4121056). A simpler workaround is to manually delete the symbolic links and replace them by the files and folders, they are pointing to. The relevant symbolic links are `benchmark-definitions` and the files inside `crd`.
+
+## Customize Installation
+
+As usual, the installation with Helm can be configured by passing a values YAML file:
+
+```
+helm install theodolite . -f <your-config.yaml>
+```
+
+We provide a minimal configuration, especially suited for development environments, with the `preconfigs/minimal.yaml`
+file.
+
+Per default, Helm installs the Theodolite CRDs used for the operator. If Theodolite will not be used as operator or if
+the CRDs are already installed, you can skip their installation by adding the flag `--skip-crds`.
+
+## Test Installation
+
+Test the installation with:
+
+```sh
+helm test theodolite
+```
+
+Our test files are located [here](templates/tests). Many subcharts have their own tests, which are also executed.
+Please note: If a test fails, Helm will stop testing.
+
+## Uninstall this Chart
+
+The Theodolite Helm can easily be removed with:
+
+```sh
+helm uninstall theodolite
+```
+
+Helm does not remove any CRDs created by this chart. You can remove them manually with:
+
+```sh
+# CRDs from Theodolite
+kubectl delete crd executions.theodolite.com
+kubectl delete crd benchmarks.theodolite.com
+# CRDs from Prometheus operator (see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#uninstall-chart)
+kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
+kubectl delete crd alertmanagers.monitoring.coreos.com
+kubectl delete crd podmonitors.monitoring.coreos.com
+kubectl delete crd probes.monitoring.coreos.com
+kubectl delete crd prometheuses.monitoring.coreos.com
+kubectl delete crd prometheusrules.monitoring.coreos.com
+kubectl delete crd servicemonitors.monitoring.coreos.com
+kubectl delete crd thanosrulers.monitoring.coreos.com
+```
+
+## Development
+
+### Dependencies
+
+The following 3rd party charts are used by Theodolite:
+
+- Kube Prometheus Stack (to install the Prometheus Operator, which is used to create a Prometheus instances)
+- Grafana (including a dashboard and a data source configuration)
+- Confluent Platform (for Kafka and Zookeeper)
+- Kafka Lag Exporter (used to collect monitoring data of the Kafka lag)
+
+### Hints
+
+#### Grafana
+
+Grafana ConfigMaps contain expressions like `{{ topic }}`. Helm uses the same syntax for template function. More information [here](https://github.com/helm/helm/issues/2798)
+  - Escape braces: {{ "{{" topic }}
+  - Let Helm render the template as raw string: {{ `{{ <config>}}` }}
+  
\ No newline at end of file
diff --git a/helm/benchmark-definitions b/helm/benchmark-definitions
new file mode 120000
index 0000000000000000000000000000000000000000..e25d86a1f35f9815225c23d78b8524f9df81f9b5
--- /dev/null
+++ b/helm/benchmark-definitions
@@ -0,0 +1 @@
+../theodolite-benchmarks/definitions/
\ No newline at end of file
diff --git a/helm/build-package.sh b/helm/build-package.sh
new file mode 100755
index 0000000000000000000000000000000000000000..e79d0497d883a8e1e0fab56ddeeb8d4ee1053648
--- /dev/null
+++ b/helm/build-package.sh
@@ -0,0 +1,3 @@
+#!/usr/bin/env sh
+
+helm package . --dependency-update && rm -r charts # We don't want to include dependencies in our index
diff --git a/helm/crds/benchmark.yaml b/helm/crds/benchmark.yaml
new file mode 120000
index 0000000000000000000000000000000000000000..fb100de7a1407462bfb6488a54b7f70014a58474
--- /dev/null
+++ b/helm/crds/benchmark.yaml
@@ -0,0 +1 @@
+./../../theodolite/crd/crd-benchmark.yaml
\ No newline at end of file
diff --git a/helm/crds/execution.yaml b/helm/crds/execution.yaml
new file mode 120000
index 0000000000000000000000000000000000000000..62d268c23c391cd7bbfbaffeaee8af1697dc446a
--- /dev/null
+++ b/helm/crds/execution.yaml
@@ -0,0 +1 @@
+./../../theodolite/crd/crd-execution.yaml
\ No newline at end of file
diff --git a/helm/preconfigs/minimal.yaml b/helm/preconfigs/minimal.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b0828c2f424e8456933dc626a66a199cd60aa5da
--- /dev/null
+++ b/helm/preconfigs/minimal.yaml
@@ -0,0 +1,12 @@
+cp-helm-charts:
+  cp-zookeeper:
+    servers: 1
+
+  cp-kafka:
+    brokers: 1
+    configurationOverrides:
+      offsets.topic.replication.factor: "1"
+
+operator:
+  resultsVolume:
+    enabled: false
diff --git a/helm/preconfigs/oci.yaml b/helm/preconfigs/oci.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..35fe5dcf423eed77cb2d3d4298088738125fa9fe
--- /dev/null
+++ b/helm/preconfigs/oci.yaml
@@ -0,0 +1,6 @@
+operator:
+  resultsVolume:
+    persistent:
+      enabled: true
+      storageClassName: "oci-bv"
+      size: 50Gi # minimal size in OCI
diff --git a/helm/templates/NOTES.txt b/helm/templates/NOTES.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ef1eea71080f55d08e193b9741327189865fa3dd
--- /dev/null
+++ b/helm/templates/NOTES.txt
@@ -0,0 +1,3 @@
+Welcome to Theodolite!
+
+Visit https://cau-se.github.io/theodolite for getting started and more information.
diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl
new file mode 100644
index 0000000000000000000000000000000000000000..569cf454a950f3f621c23472f0346c8bbd52229d
--- /dev/null
+++ b/helm/templates/_helpers.tpl
@@ -0,0 +1,69 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "theodolite.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "theodolite.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "theodolite.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "theodolite.labels" -}}
+helm.sh/chart: {{ include "theodolite.chart" . }}
+{{ include "theodolite.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "theodolite.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "theodolite.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "theodolite.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "theodolite.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the name of the results volume to use
+*/}}
+{{- define "theodolite.resultsClaimName" -}}
+{{- default (printf "%s-results" (include "theodolite.fullname" .)) .Values.operator.resultsVolume.persistent.existingClaim }}
+{{- end }}
diff --git a/helm/templates/grafana/dashboard-config-map.yaml b/helm/templates/grafana/dashboard-config-map.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0df01b20efa0fb1100fe4b7289b00b3058eb032f
--- /dev/null
+++ b/helm/templates/grafana/dashboard-config-map.yaml
@@ -0,0 +1,1007 @@
+{{- if .Values.grafana.enabled -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "theodolite.fullname" . }}-grafana-scalability
+  labels:
+    grafana_dashboard: "1"
+data:
+  k8s-dashboard.json: |-
+    {{`{
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": 1,
+    "iteration": 1589140028684,
+    "links": [],
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 7,
+          "w": 12,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 2,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_input)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{Messages In Per Second}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Messages In Per Second",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 7,
+          "w": 12,
+          "x": 12,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 3,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_output)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{Messages Out Per Second}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Messages Out Per Second",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 7
+        },
+        "hiddenSeries": false,
+        "id": 9,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": true,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Record Lag",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 7
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "instances",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Instances",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "decimals": 0,
+            "format": "short",
+            "label": "",
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 15
+        },
+        "hiddenSeries": false,
+        "id": 10,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group,topic) (kafka_consumergroup_group_offset >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Records Consumed",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 15
+        },
+        "hiddenSeries": false,
+        "id": 12,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count by(group,topic) (kafka_consumergroup_group_offset >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Partitions",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 23
+        },
+        "hiddenSeries": false,
+        "id": 11,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group,topic) (kafka_partition_latest_offset)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Records Produced (Kafka Lag Exporter)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 23
+        },
+        "hiddenSeries": false,
+        "id": 8,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Partitions (Kafka Streams Export)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "decimals": null,
+            "format": "short",
+            "label": "",
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 31
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Record Lag (Kafka Streams Export)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 31
+        },
+        "hiddenSeries": false,
+        "id": 13,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": true,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group) (kafka_consumergroup_group_lag >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "total lag",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Total Record Lag (Kafka Lag Exporter)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      }
+    ],
+    "refresh": "10s",
+    "schemaVersion": 21,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": null,
+          "current": {
+            "tags": [],
+            "text": "titan-ccp-aggregation",
+            "value": "titan-ccp-aggregation"
+          },
+          "datasource": "Prometheus",
+          "definition": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+          "hide": 0,
+          "includeAll": false,
+          "label": "Job",
+          "multi": false,
+          "name": "Job",
+          "options": [
+            {
+              "selected": true,
+              "text": "titan-ccp-aggregation",
+              "value": "titan-ccp-aggregation"
+            }
+          ],
+          "query": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+          "refresh": 0,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-1h",
+      "to": "now"
+    },
+    "timepicker": {
+      "refresh_intervals": [
+        "5s",
+        "10s",
+        "30s",
+        "1m",
+        "5m",
+        "15m",
+        "30m",
+        "1h",
+        "2h",
+        "1d"
+      ]
+    },
+    "timezone": "",
+    "title": "Scalability Benchmarking",
+    "uid": "dad0CNlZz",
+    "version": 25
+    }`}}
+{{- end }}
diff --git a/helm/templates/grafana/osp-dashboard-config-map.yaml b/helm/templates/grafana/osp-dashboard-config-map.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..22685adf8e93d95614bdbb71ef6bc993a009a150
--- /dev/null
+++ b/helm/templates/grafana/osp-dashboard-config-map.yaml
@@ -0,0 +1,1029 @@
+{{- if .Values.grafana.enabled -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "theodolite.fullname" . }}-grafana-scalability-osp
+  labels:
+    grafana_dashboard: "2"
+data:
+  osp-dashboard.json: |-
+    {{`{
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": 2,
+    "iteration": 1631777972723,
+    "links": [],
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 7,
+          "w": 12,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 2,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_ndwflow)",
+            "format": "time_series",
+            "interval": "",
+            "intervalFactor": 1,
+            "legendFormat": "ndwflow",
+            "refId": "A"
+          },
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_ndwspeed)",
+            "format": "time_series",
+            "interval": "",
+            "intervalFactor": 1,
+            "legendFormat": "ndwspeed",
+            "refId": "B"
+          },
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_ndwflow+cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_ndwspeed)",
+            "format": "time_series",
+            "interval": "",
+            "intervalFactor": 1,
+            "legendFormat": "ndwflow+ndwspeed",
+            "refId": "C"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Messages In Per Second",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 7,
+          "w": 12,
+          "x": 12,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 3,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_metrics)",
+            "format": "time_series",
+            "interval": "",
+            "intervalFactor": 1,
+            "legendFormat": "Metrics",
+            "refId": "A"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Messages Out Per Second",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 7
+        },
+        "hiddenSeries": false,
+        "id": 9,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": true,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Record Lag",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 7
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "instances",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Instances",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "decimals": 0,
+            "format": "short",
+            "label": "",
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 15
+        },
+        "hiddenSeries": false,
+        "id": 10,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group,topic) (kafka_consumergroup_group_offset >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Records Consumed",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 15
+        },
+        "hiddenSeries": false,
+        "id": 12,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count by(group,topic) (kafka_consumergroup_group_offset >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Partitions",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 23
+        },
+        "hiddenSeries": false,
+        "id": 11,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group,topic) (kafka_partition_latest_offset)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Records Produced (Kafka Lag Exporter)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 23
+        },
+        "hiddenSeries": false,
+        "id": 8,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Partitions (Kafka Streams Export)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "decimals": null,
+            "format": "short",
+            "label": "",
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 31
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Record Lag (Kafka Streams Export)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 31
+        },
+        "hiddenSeries": false,
+        "id": 13,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": true,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group) (kafka_consumergroup_group_lag >= 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "total lag",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Total Record Lag (Kafka Lag Exporter)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      }
+    ],
+    "refresh": "10s",
+    "schemaVersion": 22,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": null,
+          "current": {
+            "tags": [],
+            "text": "titan-ccp-aggregation",
+            "value": "titan-ccp-aggregation"
+          },
+          "datasource": "Prometheus",
+          "definition": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+          "hide": 0,
+          "includeAll": false,
+          "index": -1,
+          "label": "Job",
+          "multi": false,
+          "name": "Job",
+          "options": [
+            {
+              "selected": true,
+              "text": "titan-ccp-aggregation",
+              "value": "titan-ccp-aggregation"
+            }
+          ],
+          "query": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+          "refresh": 0,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-1h",
+      "to": "now"
+    },
+    "timepicker": {
+      "refresh_intervals": [
+        "5s",
+        "10s",
+        "30s",
+        "1m",
+        "5m",
+        "15m",
+        "30m",
+        "1h",
+        "2h",
+        "1d"
+      ]
+    },
+    "timezone": "",
+    "title": "OSPBench",
+    "uid": "01O646v7z",
+    "variables": {
+      "list": []
+    },
+    "version": 1
+    }`}}
+{{- end }}
diff --git a/helm/templates/kafka/kafka-client.yaml b/helm/templates/kafka/kafka-client.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02e16d33dfc9595dd16c41fa6bfe1404fd7889ab
--- /dev/null
+++ b/helm/templates/kafka/kafka-client.yaml
@@ -0,0 +1,19 @@
+{{- if .Values.kafkaClient.enabled -}}
+apiVersion: v1
+kind: Pod
+metadata:
+  # name: {{ template "theodolite.fullname" . }}-kafka-client
+  name: {{ template "theodolite.fullname" . }}-kafka-client
+spec:
+  containers:
+  - name: kafka-client
+    image: confluentinc/cp-enterprise-kafka:5.4.0
+    command:
+      - sh
+      - -c
+      - "exec tail -f /dev/null"
+  {{- with .Values.kafkaClient.nodeSelector }}
+  nodeSelector:
+    {{- toYaml . | nindent 8 }}
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/kafka/service-monitor.yaml b/helm/templates/kafka/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..68fd5f7599d36187fa7c4dee2fab211eb263c67d
--- /dev/null
+++ b/helm/templates/kafka/service-monitor.yaml
@@ -0,0 +1,16 @@
+{{- if .Values.kafkaClient.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: cp-kafka
+    appScope: titan-ccp
+  name: {{ template "theodolite.fullname" . }}-cp-kafka
+spec:
+  selector:
+    matchLabels:
+      app: cp-kafka
+  endpoints:
+    - port: metrics
+      interval: 7s
+{{- end}}
diff --git a/helm/templates/prometheus/cluster-role-binding.yaml b/helm/templates/prometheus/cluster-role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f2f167b94b79ad4db130565777cb8af486762c8c
--- /dev/null
+++ b/helm/templates/prometheus/cluster-role-binding.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.prometheus.clusterRoleBinding.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ template "theodolite.fullname" . }}-prometheus
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ template "theodolite.fullname" . }}-prometheus
+subjects:
+- kind: ServiceAccount
+  name: {{ template "theodolite.fullname" . }}-prometheus
+  namespace: {{ .Release.Namespace }}
+{{- end}}
\ No newline at end of file
diff --git a/helm/templates/prometheus/cluster-role.yaml b/helm/templates/prometheus/cluster-role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2fea2205451e01474d1ab7ef1ca342a9d975dc9
--- /dev/null
+++ b/helm/templates/prometheus/cluster-role.yaml
@@ -0,0 +1,20 @@
+{{- if .Values.prometheus.clusterRole.enabled -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: {{ template "theodolite.fullname" . }}-prometheus
+rules:
+- apiGroups: [""]
+  resources:
+  - nodes
+  - services
+  - endpoints
+  - pods
+  verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+  resources:
+  - configmaps
+  verbs: ["get"]
+- nonResourceURLs: ["/metrics"]
+  verbs: ["get"]
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/prometheus/datasource-config-map.yaml b/helm/templates/prometheus/datasource-config-map.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b28157940c4dd7cb05eca3fe04926f6e7726830f
--- /dev/null
+++ b/helm/templates/prometheus/datasource-config-map.yaml
@@ -0,0 +1,31 @@
+{{- if .Values.grafana.enabled -}}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ template "theodolite.fullname" . }}-prometheus
+  labels:
+    grafana_datasource: "1"
+data:
+  datasource.yaml: |-
+    # config file version
+    apiVersion: 1
+    datasources:
+      # <string, required> name of the datasource. Required
+    - name: Prometheus
+      # <string, required> datasource type. Required
+      type: prometheus
+      # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+      access: proxy
+      # <bool> mark as default datasource. Max one per org
+      isDefault: true
+      # <int> org id. will default to orgId 1 if not specified
+      orgId: 1
+      # <string> url
+      url: http://prometheus-operated:9090 #http://localhost:9090
+      # <map> fields that will be converted to json and stored in json_data
+      jsonData:
+        timeInterval: "15s"
+      version: 1
+      # <bool> allow users to edit datasources from the UI.
+      editable: true
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/prometheus/prometheus.yaml b/helm/templates/prometheus/prometheus.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e297b20290be9686b901fa8c76823136c6fabef
--- /dev/null
+++ b/helm/templates/prometheus/prometheus.yaml
@@ -0,0 +1,21 @@
+{{- if .Values.prometheus.enabled -}}
+apiVersion: monitoring.coreos.com/v1
+kind: Prometheus
+metadata:
+  name: {{ template "theodolite.fullname" . }}-prometheus
+spec:
+  serviceAccountName: {{ template "theodolite.fullname" . }}-prometheus
+  serviceMonitorSelector:
+    matchLabels:
+      #app: cp-kafka
+      appScope: titan-ccp
+  resources:
+    requests:
+      memory: 400Mi
+  #scrapeInterval: 1s
+  enableAdminAPI: true
+  {{- with .Values.prometheus.nodeSelector }}
+  nodeSelector:
+    {{- toYaml . | nindent 8 }}
+  {{- end}}
+{{- end}}
\ No newline at end of file
diff --git a/helm/templates/prometheus/service-account.yaml b/helm/templates/prometheus/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..090284a0cf3c6bb7ca643ee111b2d62d1bd93fb3
--- /dev/null
+++ b/helm/templates/prometheus/service-account.yaml
@@ -0,0 +1,6 @@
+{{- if .Values.prometheus.serviceAccount.enabled -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ template "theodolite.fullname" . }}-prometheus
+{{- end}}
\ No newline at end of file
diff --git a/helm/templates/tests/test-connection.yaml b/helm/templates/tests/test-connection.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7af87e98920c11bcfaccb27724e6f29fc76771a0
--- /dev/null
+++ b/helm/templates/tests/test-connection.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "theodolite.fullname" . }}-test-prometheus"
+  labels:
+    {{- include "theodolite.labels" . | nindent 4 }}
+  annotations:
+    "helm.sh/hook": test-success
+spec:
+  containers:
+    - name: wget
+      image: busybox
+      command: ['wget']
+      args: ['http://prometheus-operated:9090']
+  restartPolicy: Never
diff --git a/helm/templates/theodolite/benchmarks/benchmark-resources-config-map.yaml b/helm/templates/theodolite/benchmarks/benchmark-resources-config-map.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2dd355141350921d772edb73a1e8e7795600b0d1
--- /dev/null
+++ b/helm/templates/theodolite/benchmarks/benchmark-resources-config-map.yaml
@@ -0,0 +1,12 @@
+{{- range $configmap, $enabled := .Values.operator.theodoliteBenchmarks.resourceConfigMaps }}
+{{- if $enabled -}}
+{{- $name := kebabcase $configmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: benchmark-resources-{{ $name }}
+data:
+{{ ($.Files.Glob (printf "benchmark-definitions/%s/resources/*" $name)).AsConfig | indent 2 }}
+---
+{{- end }}
+{{- end }}
diff --git a/helm/templates/theodolite/benchmarks/benchmark.yaml b/helm/templates/theodolite/benchmarks/benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1c4cb02ea69dbed711b781535127e00e2a24f1d7
--- /dev/null
+++ b/helm/templates/theodolite/benchmarks/benchmark.yaml
@@ -0,0 +1,7 @@
+{{- range $benchmark, $enabled := .Values.operator.theodoliteBenchmarks.benchmarks }}
+{{- if $enabled -}}
+{{- $name := kebabcase $benchmark }}
+{{ $.Files.Get (printf "benchmark-definitions/%s/%s-benchmark-operator.yaml" $name $name) }}
+---
+{{- end }}
+{{- end }}
diff --git a/helm/templates/theodolite/random-scheduler/cluster-role-binding.yaml b/helm/templates/theodolite/random-scheduler/cluster-role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..605159e1b941730d9baddac679c3c1b91e8a88b6
--- /dev/null
+++ b/helm/templates/theodolite/random-scheduler/cluster-role-binding.yaml
@@ -0,0 +1,14 @@
+{{- if and .Values.randomScheduler.enabled .Values.randomScheduler.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ include "theodolite.fullname" . }}-random-scheduler
+subjects:
+- kind: ServiceAccount
+  name: {{ include "theodolite.fullname" . }}-random-scheduler
+  namespace: kube-system
+roleRef:
+  kind: ClusterRole
+  apiGroup: rbac.authorization.k8s.io
+  name: system:kube-scheduler
+{{- end }}
diff --git a/helm/templates/theodolite/random-scheduler/deployment.yaml b/helm/templates/theodolite/random-scheduler/deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..132c5dcf86b9bf6213ac0db6cc03fb1dbadbc2cd
--- /dev/null
+++ b/helm/templates/theodolite/random-scheduler/deployment.yaml
@@ -0,0 +1,34 @@
+{{- if .Values.randomScheduler.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "theodolite.fullname" . }}-random-scheduler
+  labels:
+    app: {{ include "theodolite.fullname" . }}
+    component: random-scheduler
+  namespace: kube-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: {{ include "theodolite.fullname" . }}
+      component: random-scheduler
+  template:
+    metadata:
+      labels:
+        app: {{ include "theodolite.fullname" . }}
+        component: random-scheduler
+    spec:
+      serviceAccount: {{ include "theodolite.fullname" . }}-random-scheduler
+      containers:
+        - name: random-scheduler
+          image: "{{ .Values.randomScheduler.image }}:{{ .Values.randomScheduler.imageTag }}"
+          imagePullPolicy: "{{ .Values.randomScheduler.imagePullPolicy }}"
+          env:
+            - name: TARGET_NAMESPACE
+              value: {{ .Release.Namespace }}
+      {{- with .Values.randomScheduler.nodeSelector }}
+      nodeSelector:
+      {{ toYaml . | indent 2 }}
+      {{- end }}
+{{- end }}
diff --git a/helm/templates/theodolite/random-scheduler/service-account.yaml b/helm/templates/theodolite/random-scheduler/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..593d9589de53b0c3ad9f826ea560c77acaf54a25
--- /dev/null
+++ b/helm/templates/theodolite/random-scheduler/service-account.yaml
@@ -0,0 +1,10 @@
+{{- if and .Values.randomScheduler.enabled .Values.randomScheduler.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  namespace: kube-system
+  name: {{ include "theodolite.fullname" . }}-random-scheduler 
+  labels:
+    app: {{ include "theodolite.fullname" . }}
+    component: random-scheduler
+{{- end }}
diff --git a/helm/templates/theodolite/results-volume/pvc.yaml b/helm/templates/theodolite/results-volume/pvc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..26ac56e42889ccaebbc669791ad4d318b8318fec
--- /dev/null
+++ b/helm/templates/theodolite/results-volume/pvc.yaml
@@ -0,0 +1,18 @@
+{{- if and .Values.operator.resultsVolume.persistent.enabled (not .Values.operator.resultsVolume.persistent.existingClaim) -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: {{ include "theodolite.resultsClaimName" . }}
+spec:
+  {{- if .Values.operator.resultsVolume.persistent.storageClassName }}
+  storageClassName: {{ .Values.operator.resultsVolume.persistent.storageClassName }}
+  {{- end }}
+  accessModes:
+    - ReadWriteOnce
+    {{- range .Values.operator.resultsVolume.persistent.accessModes }}
+    - {{ . | quote }}
+    {{- end }}
+  resources:
+    requests:
+      storage: {{ .Values.operator.resultsVolume.persistent.size | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/theodolite/role-binding.yaml b/helm/templates/theodolite/role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b327bb246f9716be0939416db55fc1b2cc5dd70
--- /dev/null
+++ b/helm/templates/theodolite/role-binding.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name:  {{ include "theodolite.fullname" . }}
+  labels:
+    app:  {{ include "theodolite.name" . }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ include "theodolite.fullname" . }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "theodolite.serviceAccountName" . }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/theodolite/role.yaml b/helm/templates/theodolite/role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..43ee0e43d6974cd95548df32d6c4b1df8f3e497e
--- /dev/null
+++ b/helm/templates/theodolite/role.yaml
@@ -0,0 +1,79 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: {{ include "theodolite.fullname" . }}
+rules:
+  - apiGroups:
+    - apps
+    resources:
+    - deployments
+    - statefulsets
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+    - update
+  - apiGroups:
+    - ""
+    resources:
+    - services
+    - pods
+    - configmaps
+    verbs:
+    - update
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - pods/exec
+    verbs:
+    - create
+    - get
+  - apiGroups:
+    - monitoring.coreos.com
+    resources:
+    - servicemonitors
+    verbs:
+    - update
+    - delete
+    - list
+    - create
+    - get
+  {{- if .Values.operator.enabled }}
+  - apiGroups:
+    - theodolite.com
+    resources:
+    - benchmarks
+    - benchmarks/status
+    - executions
+    - executions/status
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+    - watch
+    - update
+    - patch
+  - apiGroups:
+    - coordination.k8s.io
+    resources:
+    - leases
+    verbs:
+    - delete
+    - get
+    - create
+    - update
+  - apiGroups:
+    - ""
+    resources:
+    - events
+    verbs:
+    - create
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/theodolite/serviceaccount.yaml b/helm/templates/theodolite/serviceaccount.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4585b8ce413bf3d36cb986163788c353f2a4a2de
--- /dev/null
+++ b/helm/templates/theodolite/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "theodolite.serviceAccountName" . }}
+  labels:
+    {{- include "theodolite.labels" . | nindent 4 }}
+  {{- with .Values.serviceAccount.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/theodolite/theodolite-operator.yaml b/helm/templates/theodolite/theodolite-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7ced880cbbfbb9795ef59156ea1df7d5b860ec6
--- /dev/null
+++ b/helm/templates/theodolite/theodolite-operator.yaml
@@ -0,0 +1,83 @@
+{{- if .Values.operator.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "theodolite.fullname" . }}-operator
+spec:
+  selector:
+    matchLabels:
+      app: {{ include "theodolite.fullname" . }}
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: {{ include "theodolite.fullname" . }}
+    spec:
+      terminationGracePeriodSeconds: 0
+      serviceAccountName:  {{ include "theodolite.serviceAccountName" . }}
+      securityContext:
+        runAsUser: 0 # Set the permissions for write access to the volumes.
+      containers:
+        - name: theodolite
+          image: "{{ .Values.operator.image }}:{{ .Values.operator.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.imagePullPolicy }}"
+          env:
+            - name: NAMESPACE
+              value: {{ .Release.Namespace }}
+            - name: MODE
+              value: operator
+            - name: RESULTS_FOLDER
+              value: "./results"
+          volumeMounts:
+            - name: theodolite-results-volume
+              mountPath: "/deployments/results"
+        {{- if .Values.operator.sloChecker.lagTrend.enabled }}
+        - name: lag-trend-slo-checker
+          image: "{{ .Values.operator.sloChecker.lagTrend.image }}:{{ .Values.operator.sloChecker.lagTrend.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.sloChecker.lagTrend.imagePullPolicy }}"
+          ports:
+          - containerPort: 80
+            name: analysis
+          env:
+          - name: LOG_LEVEL
+            value: INFO
+        {{- end }}
+        {{- if .Values.operator.sloChecker.droppedRecordsKStreams.enabled }}
+        - name: slo-checker-dropped-records-kstreams
+          image: "{{ .Values.operator.sloChecker.droppedRecordsKStreams.image }}:{{ .Values.operator.sloChecker.droppedRecordsKStreams.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.sloChecker.droppedRecordsKStreams.imagePullPolicy }}"
+          ports:
+          - containerPort: 8081
+            name: analysis
+          env:
+          - name: PORT
+            value: "8081"
+          - name: LOG_LEVEL
+            value: INFO
+        {{- end }}
+        {{- if .Values.operator.resultsVolume.accessSidecar.enabled }}
+        - name: results-access
+          image: busybox:stable
+          image: "{{ .Values.operator.resultsVolume.accessSidecar.image }}:{{ .Values.operator.resultsVolume.accessSidecar.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.resultsVolume.accessSidecar.imagePullPolicy }}"
+          command:
+          - sh
+          - -c
+          - exec tail -f /dev/null
+          volumeMounts:
+          - mountPath: /results
+            name: theodolite-results-volume
+        {{- end }}
+      volumes:
+      - name: theodolite-results-volume
+        {{- if .Values.operator.resultsVolume.persistent.enabled }}
+        persistentVolumeClaim:
+          claimName: {{ include "theodolite.resultsClaimName" . | quote }}
+        {{- else }}
+        emptyDir: {}
+        {{- end }}
+      {{- with .Values.operator.nodeSelector }}
+      nodeSelector:
+      {{ toYaml . | indent 2 }}
+      {{- end }}
+{{- end }}
diff --git a/helm/update-index.sh b/helm/update-index.sh
new file mode 100755
index 0000000000000000000000000000000000000000..66c55bb8b79e18e3d06d156cb1859f2a53078999
--- /dev/null
+++ b/helm/update-index.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+
+RELEASE_NAME=$1 # Supposed to be equal to tag, e.g., v0.3.0
+
+RELEASE_PATH="https://github.com/cau-se/theodolite/releases/download"
+REPO_INDEX="../docs/index.yaml"
+
+helm repo index . --url $RELEASE_PATH/$RELEASE_NAME --merge $REPO_INDEX && \
+  mv index.yaml $REPO_INDEX
\ No newline at end of file
diff --git a/helm/values.yaml b/helm/values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9de0155cf26d26bc20975d8ec6524607ac9a1d43
--- /dev/null
+++ b/helm/values.yaml
@@ -0,0 +1,323 @@
+###
+# Theodolite resources
+###
+
+kafkaClient:
+  enabled: false
+  nodeSelector: {}
+  
+
+####
+## configuration of sub charts
+###
+
+###
+# Grafana
+###
+grafana:
+  enabled: true
+  nodeSelector: {}
+  image:
+    repository: grafana/grafana
+    tag: 6.7.3
+    pullPolicy: IfNotPresent
+  # Administrator credentials when not using an existing secret (see below)
+  adminUser: admin
+  adminPassword: admin
+  grafana.ini:
+    #org_name: Theodolite
+    auth.anonymous:
+      # enable anonymous access
+      enabled: true
+      org_role: Admin # Role for unauthenticated users, other valid values are `Viewer`, `Editor` and `Admin`
+    users:
+      default_theme: light
+    #dashboards: # the following doesn't work but is planed
+      # Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json"
+      #default_home_dashboard_path: "/tmp/dashboards/k8s-dashboard.json"
+  ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
+  ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
+  sidecar:
+    image:
+      repository: "kiwigrid/k8s-sidecar"
+      tag: "0.1.99"
+    imagePullPolicy: IfNotPresent
+    dashboards:
+      enabled: true
+      provider:
+        # allow updating provisioned dashboards from the UI
+        allowUiUpdates: true
+    datasources:
+      enabled: true
+  service:
+    nodePort: 31199
+    type: NodePort
+
+
+###
+# Confluent Platform 
+###
+
+cp-helm-charts:
+  enabled: true
+  ## ------------------------------------------------------
+  ## Zookeeper
+  ## ------------------------------------------------------
+  cp-zookeeper:
+    enabled: true
+    nodeSelector: {}
+    servers: 3 # default: 3 
+    image: confluentinc/cp-zookeeper
+    imageTag: 5.4.0
+    ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+    ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+    imagePullSecrets:
+    #  - name: "regcred"
+    heapOptions: "-Xms512M -Xmx512M"
+    persistence:
+      enabled: false
+
+  ## ------------------------------------------------------
+  ## Kafka
+  ## ------------------------------------------------------
+  cp-kafka:
+    enabled: true
+    nodeSelector: {}
+    brokers: 10 # default: 10
+    image: confluentinc/cp-enterprise-kafka
+    imageTag: 5.4.0
+    ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+    ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+    imagePullSecrets:
+    #  - name: "regcred"
+    heapOptions: "-Xms512M -Xmx512M"
+    persistence:
+      enabled: false
+    resources: {}
+    configurationOverrides:
+      #offsets.topic.replication.factor: 1
+      "message.max.bytes": "134217728" # 128 MB
+      "replica.fetch.max.bytes": "134217728" # 128 MB
+      #default.replication.factor: 1
+      # "min.insync.replicas": 2
+      "auto.create.topics.enable": false
+      #"log.retention.ms": "10000" # 10s
+      "log.retention.ms": "7200000" # 2h
+      "metrics.sample.window.ms": "5000" #5s
+      "advertised.listeners": |-
+        EXTERNAL://${HOST_IP}:$((31090 + ${KAFKA_BROKER_ID}))
+      "listener.security.protocol.map": |-
+        PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
+    
+    nodeport:
+      enabled: false
+      servicePort: 19092
+      firstListenerPort: 31090
+
+
+  ## ------------------------------------------------------
+  ## Schema Registry
+  ## ------------------------------------------------------
+  cp-schema-registry:
+    enabled: true
+    nodeSelector: {}
+    image: confluentinc/cp-schema-registry
+    imageTag: 5.4.0
+    ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+    ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+    imagePullSecrets:
+    #  - name: "regcred"
+    heapOptions: "-Xms512M -Xmx512M"
+    resources: {}
+    
+    external:
+      enabled: true
+      type: NodePort
+      servicePort: 8081
+      nodePort: 30099
+      annotations: {}
+
+  cp-kafka-rest:
+    enabled: false
+
+  cp-kafka-connect:
+    enabled: false
+
+  cp-ksql-server:
+    enabled: false
+
+  cp-control-center:
+    enabled: false
+
+
+###
+# Kafka Lag Exporter
+###
+kafka-lag-exporter:
+  enabled: true
+  image:
+    pullPolicy: IfNotPresent
+  nodeSelector: {}
+  
+  clusters:
+    - name: "theodolite-cp-kafka"
+      bootstrapBrokers: "theodolite-cp-kafka:9092"
+
+  ## The interval between refreshing metrics
+  pollIntervalSeconds: 15
+
+  prometheus:
+    serviceMonitor:
+      enabled: true
+      interval: "5s"
+      additionalLabels:
+        appScope: titan-ccp
+
+
+###
+# Prometheus Monitoring Stack (Prometheus Operator)
+###
+kube-prometheus-stack:
+  commonLabels:
+    appScope: titan-ccp
+  
+  alertmanager:
+    enabled: false
+  
+  grafana:
+    enabled: false
+  
+  kubeApiServer:
+    enabled: false
+  
+  kubelet:
+    enabled: false
+  
+  kubeControllerManager:
+    enabled: false
+  
+  coreDns:
+    enabled: false
+  
+  kubeDns:
+    enabled: false
+   
+  kubeEtcd:
+    enabled: false
+  
+  kubeScheduler:
+    enabled: false
+  
+  kubeProxy:
+    enabled: false
+  
+  kubeStateMetrics:
+    enabled: false
+  
+  nodeExporter:
+    enabled: false
+  
+  prometheusOperator:
+    enabled: true
+    namespaces:
+      releaseNamespace: true
+      additional: []
+    nodeSelector: {}
+  
+  prometheus:
+    enabled: false
+
+
+###
+# Prometheus
+###
+prometheus: 
+  enabled: true
+  nodeSelector: {}
+  
+  # depends on your cluster security and permission settings, you may need to create the following resources
+  serviceAccount:
+    enabled: true
+  clusterRole:
+    enabled: true
+  clusterRoleBinding:
+    enabled: true
+
+###
+# Theodolite Operator
+###
+operator:
+  enabled: true
+  
+  image: ghcr.io/cau-se/theodolite
+  imageTag: latest
+  imagePullPolicy: Always
+
+  nodeSelector: {}
+
+  sloChecker:
+    lagTrend:
+      enabled: true
+      image: ghcr.io/cau-se/theodolite-slo-checker-lag-trend
+      imageTag: latest
+      imagePullPolicy: Always
+    droppedRecordsKStreams:
+      enabled: true
+      image: ghcr.io/cau-se/theodolite-slo-checker-dropped-records-kstreams
+      imageTag: latest
+      imagePullPolicy: Always
+
+  resultsVolume:
+    persistent:
+      enabled: false
+      # existingClaim:
+      # storageClassName:
+      accessModes:
+        - ReadWriteOnce
+      size: 1Gi
+    accessSidecar:
+      enabled: true
+      image: busybox
+      imageTag: stable
+      imagePullPolicy: IfNotPresent
+
+  theodoliteBenchmarks:
+    resourceConfigMaps:
+      uc1LoadGenerator: true
+      uc1Kstreams: true
+      uc1Flink: true
+      uc2LoadGenerator: true
+      uc2Kstreams: true
+      uc2Flink: true
+      uc3LoadGenerator: true
+      uc3Kstreams: true
+      uc3Flink: true
+      uc4LoadGenerator: true
+      uc4Kstreams: true
+      uc4Flink: true
+    benchmarks:
+      uc1Kstreams: true
+      uc1Flink: true
+      uc2Kstreams: true
+      uc2Flink: true
+      uc3Kstreams: true
+      uc3Flink: true
+      uc4Kstreams: true
+      uc4Flink: true
+
+serviceAccount:
+  create: true
+
+rbac:
+  create: true
+
+randomScheduler:
+  enabled: true
+  image: ghcr.io/cau-se/theodolite-random-scheduler
+  imageTag: latest
+  imagePullPolicy: Always
+  rbac:
+    create: true
+  serviceAccount:
+    create: true
+  nodeSelector: {}
diff --git a/slo-checker/dropped-records/Dockerfile b/slo-checker/dropped-records/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..032b8153a6989ca04631ba553289dacb3620a38d
--- /dev/null
+++ b/slo-checker/dropped-records/Dockerfile
@@ -0,0 +1,6 @@
+FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7
+
+COPY requirements.txt requirements.txt
+RUN pip install -r requirements.txt
+
+COPY ./app /app
\ No newline at end of file
diff --git a/slo-checker/dropped-records/README.md b/slo-checker/dropped-records/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a1ea982a399201143ad50f173c934ff58abbf4a
--- /dev/null
+++ b/slo-checker/dropped-records/README.md
@@ -0,0 +1,80 @@
+# Kafka Streams Dropped Record SLO Evaluator
+
+## Execution
+
+For development:
+
+```sh
+uvicorn main:app --reload  --port 81 # run this command inside the app/ folder
+```
+
+## Build the docker image:
+
+```sh
+docker build . -t theodolite-evaluator
+```
+
+Run the Docker image:
+
+```sh
+docker run -p 80:81 theodolite-evaluator
+```
+
+## Configuration
+
+You can set the `HOST` and the `PORT` (and a lot of more parameters) via environment variables. Default is `0.0.0.0:80`.
+For more information see the [Gunicorn/FastAPI Docker docs](https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker#advanced-usage).
+
+## API Documentation
+
+The running webserver provides a REST API with the following route:
+
+* /dropped-records
+  * Method: POST
+  * Body:
+    * results
+      * metric-metadata
+      * values
+    * metadata
+      * threshold
+      * warmup
+
+The body of the request must be a JSON string that satisfies the following conditions:
+
+* **dropped records**: This property is based on the [Range Vector type](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors) from Prometheus and must have the following JSON *structure*:
+
+    ```json
+    {
+        "results": [
+            [
+                {
+                    "metric": {
+                        "<label-name>": "<label-value>"
+                    },
+                    "values": [
+                        [
+                            <unix_timestamp>, // 1.634624989695E9
+                            "<sample_value>" // integer
+                        ]
+                    ]
+                }
+            ]
+        ],
+        "metadata": {
+            "threshold": 2000000,
+            "warmup": 60
+        }
+    }
+    ```
+
+### description
+
+* results:
+  * metric-metadata:
+    * Labels of this metric. The `dropped-records` slo checker does not use labels in the calculation of the service level objective.
+  * results
+    * The `<unix_timestamp>` provided as the first element of each element in the "values" array must be the timestamp of the measurement value in seconds (with optional decimal precision)
+    * The `<sample_value>` must be the measurement value as string.
+* metadata: For the calculation of the service level objective require metadata.
+  * **threshold**: Must be an unsigned integer that specifies the threshold for the SLO evaluation. The SLO is considered fulfilled, if the result value is below the threshold. If the result value is equal or above the threshold, the SLO is considered not fulfilled.
+  * **warmup**: Specifieds the warmup time in seconds that are ignored for evaluating the SLO.
diff --git a/slo-checker/dropped-records/app/main.py b/slo-checker/dropped-records/app/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1577f9c11ed5a2798ee0b4505ae0739301ab2a8
--- /dev/null
+++ b/slo-checker/dropped-records/app/main.py
@@ -0,0 +1,32 @@
+from fastapi import FastAPI,Request
+import logging
+import os
+import json
+import sys
+
+app = FastAPI()
+
+logging.basicConfig(stream=sys.stdout,
+                    format="%(asctime)s %(levelname)s %(name)s: %(message)s")
+logger = logging.getLogger("API")
+
+
+if os.getenv('LOG_LEVEL') == 'INFO':
+    logger.setLevel(logging.INFO)
+elif os.getenv('LOG_LEVEL') == 'WARNING':
+    logger.setLevel(logging.WARNING)
+elif os.getenv('LOG_LEVEL') == 'DEBUG':
+    logger.setLevel(logging.DEBUG)
+
+
+def check_service_level_objective(results, threshold):
+    return max(results) < threshold
+
+@app.post("/dropped-records",response_model=bool)
+async def evaluate_slope(request: Request):
+    data = json.loads(await request.body())
+    warmup = int(data['results'][0][0]['values'][0][0]) + int(data['metadata']['warmup'])
+    results = [int(val[1]) if(int(val[0]>=warmup)) else 0 for result in data['results'] for r in result for val in r['values']  ]
+    return check_service_level_objective(results=results, threshold=data['metadata']["threshold"])
+
+logger.info("SLO evaluator is online")
\ No newline at end of file
diff --git a/slo-checker/dropped-records/app/test.py b/slo-checker/dropped-records/app/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c657c914002066357d58d88d7f8e4afe920db45
--- /dev/null
+++ b/slo-checker/dropped-records/app/test.py
@@ -0,0 +1,23 @@
+import unittest
+from main import app, check_service_level_objective
+import numpy as np
+import json
+from fastapi.testclient import TestClient
+
+class TestSloEvaluation(unittest.TestCase):
+    client = TestClient(app)
+
+    def test_1_rep(self):
+        with open('../resources/test-1-rep-success.json') as json_file:
+            data = json.load(json_file)
+            response = self.client.post("/dropped-records", json=data)
+            self.assertEquals(response.json(), True)
+
+    def test_check_service_level_objective(self):
+        list = [ x for x in range(-100, 100) ]
+
+        self.assertEquals(check_service_level_objective(list, 90), False)
+        self.assertEquals(check_service_level_objective(list, 110), True)
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/slo-checker/dropped-records/requirements.txt b/slo-checker/dropped-records/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b6c3863226c2bd5e8bcd7982b2674dee593f192
--- /dev/null
+++ b/slo-checker/dropped-records/requirements.txt
@@ -0,0 +1,5 @@
+fastapi==0.65.2
+scikit-learn==0.20.3
+pandas==1.0.3
+uvicorn
+requests
diff --git a/slo-checker/dropped-records/resources/test-1-rep-success.json b/slo-checker/dropped-records/resources/test-1-rep-success.json
new file mode 100644
index 0000000000000000000000000000000000000000..0964c30fed60e34c1ac4cf6b6b89f81d95a2f0eb
--- /dev/null
+++ b/slo-checker/dropped-records/resources/test-1-rep-success.json
@@ -0,0 +1,273 @@
+{
+    "results": [
+        [
+            {
+                "metric": {
+                    "job": "titan-ccp-aggregation"
+                },
+                "values": [
+                    [
+                        1.634624674695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624679695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624684695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624689695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624694695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624699695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624704695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624709695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624714695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624719695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624724695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624729695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624734695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624739695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624744695E9,
+                        "1"
+                    ],
+                    [
+                        1.634624749695E9,
+                        "3"
+                    ],
+                    [
+                        1.634624754695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624759695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624764695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624769695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624774695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624779695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624784695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624789695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624794695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624799695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624804695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624809695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624814695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624819695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624824695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624829695E9,
+                        "159524"
+                    ],
+                    [
+                        1.634624834695E9,
+                        "209870"
+                    ],
+                    [
+                        1.634624839695E9,
+                        "278597"
+                    ],
+                    [
+                        1.634624844695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624849695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624854695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624859695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624864695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624869695E9,
+                        "606893"
+                    ],
+                    [
+                        1.634624874695E9,
+                        "653534"
+                    ],
+                    [
+                        1.634624879695E9,
+                        "755796"
+                    ],
+                    [
+                        1.634624884695E9,
+                        "919317"
+                    ],
+                    [
+                        1.634624889695E9,
+                        "919317"
+                    ],
+                    [
+                        1.634624894695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624899695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624904695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624909695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624914695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624919695E9,
+                        "1036530"
+                    ],
+                    [
+                        1.634624924695E9,
+                        "1078477"
+                    ],
+                    [
+                        1.634624929695E9,
+                        "1194775"
+                    ],
+                    [
+                        1.634624934695E9,
+                        "1347755"
+                    ],
+                    [
+                        1.634624939695E9,
+                        "1352151"
+                    ],
+                    [
+                        1.634624944695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624949695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624954695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624959695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624964695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624969695E9,
+                        "1525685"
+                    ],
+                    [
+                        1.634624974695E9,
+                        "1689296"
+                    ],
+                    [
+                        1.634624979695E9,
+                        "1771358"
+                    ],
+                    [
+                        1.634624984695E9,
+                        "1854284"
+                    ],
+                    [
+                        1.634624989695E9,
+                        "1854284"
+                    ]
+                ]
+            }
+        ]
+    ],
+    "metadata": {
+        "threshold": 2000000,
+        "warmup": 60
+    }
+}
\ No newline at end of file
diff --git a/slo-checker/record-lag/Dockerfile b/slo-checker/record-lag/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..032b8153a6989ca04631ba553289dacb3620a38d
--- /dev/null
+++ b/slo-checker/record-lag/Dockerfile
@@ -0,0 +1,6 @@
+FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7
+
+COPY requirements.txt requirements.txt
+RUN pip install -r requirements.txt
+
+COPY ./app /app
\ No newline at end of file
diff --git a/slo-checker/record-lag/README.md b/slo-checker/record-lag/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4882eeaf54aadfb8cbf33a957e6052a7b74123b
--- /dev/null
+++ b/slo-checker/record-lag/README.md
@@ -0,0 +1,80 @@
+# Lag Trend SLO Evaluator
+
+## Execution
+
+For development:
+
+```sh
+uvicorn main:app --reload # run this command inside the app/ folder
+```
+
+## Build the docker image:
+
+```sh
+docker build . -t theodolite-evaluator
+```
+
+Run the Docker image:
+
+```sh
+docker run -p 80:80 theodolite-evaluator
+```
+
+## Configuration
+
+You can set the `HOST` and the `PORT` (and a lot of more parameters) via environment variables. Default is `0.0.0.0:80`.
+For more information see the [Gunicorn/FastAPI Docker docs](https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker#advanced-usage).
+
+# API Documentation
+
+The running webserver provides a REST API with the following route:
+
+* /dropped-records
+  * Method: POST
+  * Body:
+    * results
+      * metric-metadata
+      * values
+    * metadata
+      * threshold
+      * warmup
+
+The body of the request must be a JSON string that satisfies the following conditions:
+
+* **total_lag**: This property is based on the [Range Vector type](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors) from Prometheus and must have the following JSON *structure*:
+
+    ```json
+    {
+        "results": [
+            [
+                {
+                    "metric": {
+                        "<label-name>": "<label-value>"
+                    },
+                    "values": [
+                        [
+                            <unix_timestamp>, // 1.634624989695E9
+                            "<sample_value>" // integer
+                        ]
+                    ]
+                }
+            ]
+        ],
+        "metadata": {
+            "threshold": 2000000,
+            "warmup": 60
+        }
+    }
+    ```
+
+### description
+
+* results:
+  * metric-metadata:
+    * Labels of this metric. The `dropped-records` slo checker does not use labels in the calculation of the service level objective.
+  * results
+    * The `<unix_timestamp>` provided as the first element of each element in the "values" array must be the timestamp of the measurement value in seconds (with optional decimal precision)
+    * The `<sample_value>` must be the measurement value as string.
+* metadata: For the calculation of the service level objective require metadata.
+  * **threshold**: Must be an unsigned integer that specifies the threshold for the SLO evaluation. The SLO is considered fulfilled, if the result value is below the threshold. If the result value is equal or above the threshold, the SLO is considered not fulfilled.
+  * **warmup**: Specifieds the warmup time in seconds that are ignored for evaluating the SLO.
diff --git a/slo-checker/record-lag/app/main.py b/slo-checker/record-lag/app/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..621fa0cfc9c27e809fd92752de93f2795fa32c05
--- /dev/null
+++ b/slo-checker/record-lag/app/main.py
@@ -0,0 +1,55 @@
+from fastapi import FastAPI,Request
+import trend_slope_computer as trend_slope_computer
+import logging
+import os
+import pandas as pd
+import json
+import sys
+from statistics import median
+
+app = FastAPI()
+
+logging.basicConfig(stream=sys.stdout,
+                    format="%(asctime)s %(levelname)s %(name)s: %(message)s")
+logger = logging.getLogger("API")
+
+
+if os.getenv('LOG_LEVEL') == 'INFO':
+    logger.setLevel(logging.INFO)
+elif os.getenv('LOG_LEVEL') == 'WARNING':
+    logger.setLevel(logging.WARNING)
+elif os.getenv('LOG_LEVEL') == 'DEBUG':
+    logger.setLevel(logging.DEBUG)
+
+def calculate_slope_trend(results, warmup):
+    d = []
+    for result in results:
+        group = result['metric']['group']
+        for value in result['values']:
+            d.append({'group': group, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+
+    df = pd.DataFrame(d)
+
+    logger.info("Calculating trend slope with warmup of %s seconds for data frame:\n %s", warmup, df)
+    try:
+        trend_slope = trend_slope_computer.compute(df, warmup)
+    except Exception as e:
+        err_msg = 'Computing trend slope failed.'
+        logger.exception(err_msg)
+        logger.error('Mark this subexperiment as not successful and continue benchmark.')
+        return float('inf')
+
+    logger.info("Computed lag trend slope is '%s'", trend_slope)
+    return trend_slope
+
+def check_service_level_objective(results, threshold):
+    return median(results) < threshold
+
+@app.post("/evaluate-slope",response_model=bool)
+async def evaluate_slope(request: Request):
+    data = json.loads(await request.body())
+    results = [calculate_slope_trend(total_lag, data['metadata']['warmup']) for total_lag in data['results']]
+    return check_service_level_objective(results=results, threshold=data['metadata']["threshold"])
+
+logger.info("SLO evaluator is online")
\ No newline at end of file
diff --git a/slo-checker/record-lag/app/test.py b/slo-checker/record-lag/app/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8d81f86b16255dcdce5337d8f00e922b98b4f82
--- /dev/null
+++ b/slo-checker/record-lag/app/test.py
@@ -0,0 +1,30 @@
+import unittest
+from main import app, check_service_level_objective
+import json
+from fastapi.testclient import TestClient
+
+class TestSloEvaluation(unittest.TestCase):
+    client = TestClient(app)
+
+    def test_1_rep(self):
+        with open('../resources/test-1-rep-success.json') as json_file:
+            data = json.load(json_file)
+            response = self.client.post("/evaluate-slope", json=data)
+            self.assertEquals(response.json(), True)
+
+    def test_3_rep(self):
+        with open('../resources/test-3-rep-success.json') as json_file:
+            data = json.load(json_file)
+            response = self.client.post("/evaluate-slope", json=data)
+            self.assertEquals(response.json(), True)
+
+    def test_check_service_level_objective(self):
+        list = [1,2,3,4]
+        self.assertEquals(check_service_level_objective(list, 2), False)
+        self.assertEquals(check_service_level_objective(list, 3), True)
+        list = [1,2,3,4,5]
+        self.assertEquals(check_service_level_objective(list, 2), False)
+        self.assertEquals(check_service_level_objective(list, 4), True)
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/execution/lib/trend_slope_computer.py b/slo-checker/record-lag/app/trend_slope_computer.py
similarity index 52%
rename from execution/lib/trend_slope_computer.py
rename to slo-checker/record-lag/app/trend_slope_computer.py
index 90ae26cfd275f53307e19532f047e5e0a9326d3a..51b28f2baa5110a6d64f3adc1ac9a94c6b6f3ce9 100644
--- a/execution/lib/trend_slope_computer.py
+++ b/slo-checker/record-lag/app/trend_slope_computer.py
@@ -2,14 +2,12 @@ from sklearn.linear_model import LinearRegression
 import pandas as pd
 import os
 
-def compute(directory, filename, warmup_sec):
-    df = pd.read_csv(os.path.join(directory, filename))
-    input = df
-    input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
-    regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
+def compute(data, warmup_sec):
+    data['sec_start'] = data.loc[0:, 'timestamp'] - data.iloc[0]['timestamp']
+    regress = data.loc[data['sec_start'] >= warmup_sec] # Warm-Up
 
-    X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array
-    Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
+    X = regress.iloc[:, 1].values.reshape(-1, 1)  # values converts it into a numpy array
+    Y = regress.iloc[:, 2].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
     linear_regressor = LinearRegression()  # create object for the class
     linear_regressor.fit(X, Y)  # perform linear regression
     Y_pred = linear_regressor.predict(X)  # make predictions
diff --git a/slo-checker/record-lag/requirements.txt b/slo-checker/record-lag/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b6c3863226c2bd5e8bcd7982b2674dee593f192
--- /dev/null
+++ b/slo-checker/record-lag/requirements.txt
@@ -0,0 +1,5 @@
+fastapi==0.65.2
+scikit-learn==0.20.3
+pandas==1.0.3
+uvicorn
+requests
diff --git a/slo-checker/record-lag/resources/test-1-rep-success.json b/slo-checker/record-lag/resources/test-1-rep-success.json
new file mode 100644
index 0000000000000000000000000000000000000000..dfe11282720ebfcdd60582b7717da892bc85a923
--- /dev/null
+++ b/slo-checker/record-lag/resources/test-1-rep-success.json
@@ -0,0 +1,141 @@
+{
+    "results": [
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621008960827E9,
+                        "234"
+                    ],
+                    [
+                        1.621008965827E9,
+                        "234"
+                    ],
+                    [
+                        1.621008970827E9,
+                        "234"
+                    ],
+                    [
+                        1.621008975827E9,
+                        "719"
+                    ],
+                    [
+                        1.621008980827E9,
+                        "719"
+                    ],
+                    [
+                        1.621008985827E9,
+                        "719"
+                    ],
+                    [
+                        1.621008990827E9,
+                        "1026"
+                    ],
+                    [
+                        1.621008995827E9,
+                        "1026"
+                    ],
+                    [
+                        1.621009000827E9,
+                        "1026"
+                    ],
+                    [
+                        1.621009005827E9,
+                        "534"
+                    ],
+                    [
+                        1.621009010827E9,
+                        "534"
+                    ],
+                    [
+                        1.621009015827E9,
+                        "534"
+                    ],
+                    [
+                        1.621009020827E9,
+                        "943"
+                    ],
+                    [
+                        1.621009025827E9,
+                        "943"
+                    ],
+                    [
+                        1.621009030827E9,
+                        "943"
+                    ],
+                    [
+                        1.621009035827E9,
+                        "66"
+                    ],
+                    [
+                        1.621009040827E9,
+                        "66"
+                    ],
+                    [
+                        1.621009045827E9,
+                        "66"
+                    ],
+                    [
+                        1.621009050827E9,
+                        "841"
+                    ],
+                    [
+                        1.621009055827E9,
+                        "841"
+                    ],
+                    [
+                        1.621009060827E9,
+                        "841"
+                    ],
+                    [
+                        1.621009065827E9,
+                        "405"
+                    ],
+                    [
+                        1.621009070827E9,
+                        "405"
+                    ],
+                    [
+                        1.621009075827E9,
+                        "405"
+                    ],
+                    [
+                        1.621009080827E9,
+                        "201"
+                    ],
+                    [
+                        1.621009085827E9,
+                        "201"
+                    ],
+                    [
+                        1.621009090827E9,
+                        "201"
+                    ],
+                    [
+                        1.621009095827E9,
+                        "227"
+                    ],
+                    [
+                        1.621009100827E9,
+                        "227"
+                    ],
+                    [
+                        1.621009105827E9,
+                        "227"
+                    ],
+                    [
+                        1.621009110827E9,
+                        "943"
+                    ]
+                ]
+            }
+        ]
+    ],
+    "metadata": {
+        "threshold": 2000,
+        "warmup": 0
+    }
+}
\ No newline at end of file
diff --git a/slo-checker/record-lag/resources/test-3-rep-success.json b/slo-checker/record-lag/resources/test-3-rep-success.json
new file mode 100644
index 0000000000000000000000000000000000000000..cf483f42f3783aecd1f428ac7bbbe2090c4cade0
--- /dev/null
+++ b/slo-checker/record-lag/resources/test-3-rep-success.json
@@ -0,0 +1,291 @@
+{
+    "results": [
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621012384232E9,
+                        "6073"
+                    ],
+                    [
+                        1.621012389232E9,
+                        "6073"
+                    ],
+                    [
+                        1.621012394232E9,
+                        "6073"
+                    ],
+                    [
+                        1.621012399232E9,
+                        "227"
+                    ],
+                    [
+                        1.621012404232E9,
+                        "227"
+                    ],
+                    [
+                        1.621012409232E9,
+                        "227"
+                    ],
+                    [
+                        1.621012414232E9,
+                        "987"
+                    ],
+                    [
+                        1.621012419232E9,
+                        "987"
+                    ],
+                    [
+                        1.621012424232E9,
+                        "987"
+                    ],
+                    [
+                        1.621012429232E9,
+                        "100"
+                    ],
+                    [
+                        1.621012434232E9,
+                        "100"
+                    ],
+                    [
+                        1.621012439232E9,
+                        "100"
+                    ],
+                    [
+                        1.621012444232E9,
+                        "959"
+                    ],
+                    [
+                        1.621012449232E9,
+                        "959"
+                    ],
+                    [
+                        1.621012454232E9,
+                        "959"
+                    ],
+                    [
+                        1.621012459232E9,
+                        "625"
+                    ],
+                    [
+                        1.621012464232E9,
+                        "625"
+                    ],
+                    [
+                        1.621012469232E9,
+                        "625"
+                    ],
+                    [
+                        1.621012474232E9,
+                        "683"
+                    ],
+                    [
+                        1.621012479232E9,
+                        "683"
+                    ],
+                    [
+                        1.621012484232E9,
+                        "683"
+                    ],
+                    [
+                        1.621012489232E9,
+                        "156"
+                    ]
+                ]
+            }
+        ],
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621012545211E9,
+                        "446"
+                    ],
+                    [
+                        1.621012550211E9,
+                        "446"
+                    ],
+                    [
+                        1.621012555211E9,
+                        "446"
+                    ],
+                    [
+                        1.621012560211E9,
+                        "801"
+                    ],
+                    [
+                        1.621012565211E9,
+                        "801"
+                    ],
+                    [
+                        1.621012570211E9,
+                        "801"
+                    ],
+                    [
+                        1.621012575211E9,
+                        "773"
+                    ],
+                    [
+                        1.621012580211E9,
+                        "773"
+                    ],
+                    [
+                        1.621012585211E9,
+                        "773"
+                    ],
+                    [
+                        1.621012590211E9,
+                        "509"
+                    ],
+                    [
+                        1.621012595211E9,
+                        "509"
+                    ],
+                    [
+                        1.621012600211E9,
+                        "509"
+                    ],
+                    [
+                        1.621012605211E9,
+                        "736"
+                    ],
+                    [
+                        1.621012610211E9,
+                        "736"
+                    ],
+                    [
+                        1.621012615211E9,
+                        "736"
+                    ],
+                    [
+                        1.621012620211E9,
+                        "903"
+                    ],
+                    [
+                        1.621012625211E9,
+                        "903"
+                    ],
+                    [
+                        1.621012630211E9,
+                        "903"
+                    ],
+                    [
+                        1.621012635211E9,
+                        "512"
+                    ],
+                    [
+                        1.621012640211E9,
+                        "512"
+                    ],
+                    [
+                        1.621012645211E9,
+                        "512"
+                    ]
+                ]
+            }
+        ],
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621012700748E9,
+                        "6484"
+                    ],
+                    [
+                        1.621012705748E9,
+                        "6484"
+                    ],
+                    [
+                        1.621012710748E9,
+                        "6484"
+                    ],
+                    [
+                        1.621012715748E9,
+                        "505"
+                    ],
+                    [
+                        1.621012720748E9,
+                        "505"
+                    ],
+                    [
+                        1.621012725748E9,
+                        "505"
+                    ],
+                    [
+                        1.621012730748E9,
+                        "103"
+                    ],
+                    [
+                        1.621012735748E9,
+                        "103"
+                    ],
+                    [
+                        1.621012740748E9,
+                        "103"
+                    ],
+                    [
+                        1.621012745748E9,
+                        "201"
+                    ],
+                    [
+                        1.621012750748E9,
+                        "201"
+                    ],
+                    [
+                        1.621012755748E9,
+                        "201"
+                    ],
+                    [
+                        1.621012760748E9,
+                        "965"
+                    ],
+                    [
+                        1.621012765748E9,
+                        "965"
+                    ],
+                    [
+                        1.621012770748E9,
+                        "965"
+                    ],
+                    [
+                        1.621012775748E9,
+                        "876"
+                    ],
+                    [
+                        1.621012780748E9,
+                        "876"
+                    ],
+                    [
+                        1.621012785748E9,
+                        "876"
+                    ],
+                    [
+                        1.621012790748E9,
+                        "380"
+                    ],
+                    [
+                        1.621012795748E9,
+                        "380"
+                    ],
+                    [
+                        1.621012800748E9,
+                        "380"
+                    ]
+                ]
+            }
+        ]
+    ],
+    "metadata": {
+        "threshold": 2000,
+        "warmup": 0
+    }
+}
\ No newline at end of file
diff --git a/benchmarks/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/.settings/org.eclipse.jdt.ui.prefs
diff --git a/benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/build.gradle b/theodolite-benchmarks/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..cd2ca985538ff84ec6b21aa5dccea86940d8fc6d
--- /dev/null
+++ b/theodolite-benchmarks/build.gradle
@@ -0,0 +1,18 @@
+// Plugins
+allprojects {
+  apply plugin: 'eclipse'
+}
+
+// Check for updates every build
+configurations.all {
+    resolutionStrategy.cacheChangingModulesFor 0, 'seconds'
+}
+
+allprojects {
+  eclipse {
+      classpath {
+         downloadSources=true
+         downloadJavadoc=true
+      }
+  }
+}
diff --git a/theodolite-benchmarks/buildSrc/build.gradle b/theodolite-benchmarks/buildSrc/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..4c099de32dc97ed3aa0417e8fff1f06e2a50dfd8
--- /dev/null
+++ b/theodolite-benchmarks/buildSrc/build.gradle
@@ -0,0 +1,24 @@
+buildscript {
+  repositories {
+    maven {
+      url "https://plugins.gradle.org/m2/"
+    }
+  }
+  dependencies {
+    classpath "com.github.jengelman.gradle.plugins:shadow:6.0.0"
+  }
+}
+
+// to discover the precompiled script plugins
+plugins {
+    id 'groovy-gradle-plugin'
+}
+
+repositories {
+    gradlePluginPortal() // so that external plugins can be resolved in dependencies section
+}
+
+dependencies {
+    implementation 'gradle.plugin.com.github.spotbugs.snom:spotbugs-gradle-plugin:4.6.0'
+    implementation 'com.github.jengelman.gradle.plugins:shadow:6.0.0'
+}
diff --git a/benchmarks/application-kafkastreams-commons/build.gradle b/theodolite-benchmarks/buildSrc/settings.gradle
similarity index 100%
rename from benchmarks/application-kafkastreams-commons/build.gradle
rename to theodolite-benchmarks/buildSrc/settings.gradle
diff --git a/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.flink.gradle b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.flink.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..333a87bf55bcd0051be05ca91dfe8dc9a2e9e8fa
--- /dev/null
+++ b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.flink.gradle
@@ -0,0 +1,67 @@
+plugins {
+  id 'theodolite.java-conventions'
+  id 'application' // executable
+  id 'com.github.johnrengelman.shadow' // create fat jar
+}
+
+applicationDefaultJvmArgs = ["-Dlog4j.configuration=log4j.properties"]
+
+
+run.classpath = sourceSets.main.runtimeClasspath
+
+jar {
+    manifest {
+        attributes 'Built-By': System.getProperty('user.name'),
+                   'Build-Jdk': System.getProperty('java.version')
+    }
+}
+
+shadowJar {
+    configurations = [project.configurations.compile]
+    zip64 true
+}
+
+tasks.distZip.enabled = false
+
+ext {
+  flinkVersion = '1.12.2'
+  scalaBinaryVersion = '2.12'
+}
+
+repositories {
+  jcenter()
+  maven {
+    url "https://oss.sonatype.org/content/repositories/snapshots/"
+  }
+  maven {
+    url 'https://packages.confluent.io/maven/'
+  }
+}
+
+dependencies {
+    // Special version required because of https://issues.apache.org/jira/browse/FLINK-13703
+    implementation('org.industrial-devops:titan-ccp-common:0.1.0-flink-ready-SNAPSHOT') { changing = true }
+    implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+
+    // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+    implementation 'org.apache.kafka:kafka-clients:2.2.0'
+    implementation 'com.google.guava:guava:24.1-jre'
+    implementation 'com.google.code.gson:gson:2.8.2'
+    implementation 'org.slf4j:slf4j-simple:1.6.1'
+    implementation project(':flink-commons')
+
+    //compile group: 'org.apache.kafka', name: 'kafka-clients', version: "2.2.0"
+    implementation "org.apache.flink:flink-java:${flinkVersion}"
+    implementation "org.apache.flink:flink-streaming-java_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-table-api-java-bridge_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-table-planner-blink_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-connector-kafka_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-avro:${flinkVersion}"
+    implementation "org.apache.flink:flink-avro-confluent-registry:${flinkVersion}"
+    implementation "org.apache.flink:flink-runtime-web_${scalaBinaryVersion}:${flinkVersion}" // For debugging
+    implementation "org.apache.flink:flink-statebackend-rocksdb_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-metrics-prometheus_${scalaBinaryVersion}:${flinkVersion}"
+
+    // Use JUnit test framework
+    testImplementation 'junit:junit:4.12'
+}
diff --git a/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.java-commons.gradle b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.java-commons.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..f195d6e117d29cad7a6d7494835626f92fb1c2b0
--- /dev/null
+++ b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.java-commons.gradle
@@ -0,0 +1,7 @@
+plugins {
+  // common java conventions
+  id 'theodolite.java-conventions'
+
+  // provide library capability in commons
+  id 'java-library'
+}
diff --git a/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.java-conventions.gradle b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.java-conventions.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..773872648edfd4b30218a99d307b6e7c45ed3470
--- /dev/null
+++ b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.java-conventions.gradle
@@ -0,0 +1,70 @@
+plugins {
+  id 'java'
+  id 'checkstyle'
+  id 'pmd'
+
+  // NOTE: external plugin version is specified in implementation dependency artifact of the project's build file
+  id 'com.github.spotbugs'
+}
+
+java {
+  sourceCompatibility = JavaVersion.VERSION_11
+  targetCompatibility = JavaVersion.VERSION_11
+}
+
+// Per default XML reports for SpotBugs are generated
+// Include this to generate HTML reports
+tasks.withType(com.github.spotbugs.snom.SpotBugsTask) {
+  reports {
+    // Either HTML or XML reports can be activated
+    html.enabled true
+    xml.enabled false
+  }
+}
+
+task pmd {
+  group 'Quality Assurance'
+  description 'Run PMD'
+
+  dependsOn 'pmdMain'
+  dependsOn 'pmdTest'
+}
+
+task checkstyle {
+  group 'Quality Assurance'
+  description 'Run Checkstyle'
+
+  dependsOn 'checkstyleMain'
+  dependsOn 'checkstyleTest'
+}
+
+task spotbugs {
+  group 'Quality Assurance'
+  description 'Run SpotBugs'
+
+  dependsOn 'spotbugsMain'
+  dependsOn 'spotbugsTest'
+}
+
+pmd {
+  ruleSets = [] // Gradle requires to clean the rule sets first
+  ruleSetFiles = files("$rootProject.projectDir/config/pmd.xml")
+  ignoreFailures = false
+  toolVersion = "6.7.0"
+}
+
+checkstyle {
+  configDirectory = file("$rootProject.projectDir/config")
+  configFile = file("$rootProject.projectDir/config/checkstyle.xml")
+  maxWarnings = 0
+  ignoreFailures = false
+  toolVersion = "8.12"
+}
+
+spotbugs {
+  excludeFilter = file("$rootProject.projectDir/config/spotbugs-exclude-filter.xml")
+  reportLevel = "low"
+  effort = "max"
+  ignoreFailures = false
+  toolVersion = '4.1.4'
+}
diff --git a/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.kstreams.gradle b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.kstreams.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..eece7b835ae9d6f39283ea371ce8b0b8194cdaa0
--- /dev/null
+++ b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.kstreams.gradle
@@ -0,0 +1,33 @@
+plugins {
+  // common java conventions
+  id 'theodolite.java-conventions'
+
+  // make executable
+  id 'application'
+}
+
+tasks.distZip.enabled = false
+
+repositories {
+  jcenter()
+  maven {
+    url "https://oss.sonatype.org/content/repositories/snapshots/"
+  }
+  maven {
+      url 'https://packages.confluent.io/maven/'
+  }
+}
+
+dependencies {
+    // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+    implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+    implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+    implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
+    implementation 'com.google.code.gson:gson:2.8.2'
+    implementation 'com.google.guava:guava:24.1-jre'
+    implementation 'org.slf4j:slf4j-simple:1.7.25'
+    implementation project(':kstreams-commons')
+
+    // Use JUnit test framework
+    testImplementation 'junit:junit:4.12'
+}
diff --git a/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.load-generator.gradle b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.load-generator.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..c6c2b6057cf35c32faa4d67b6ea6dba9e5c13beb
--- /dev/null
+++ b/theodolite-benchmarks/buildSrc/src/main/groovy/theodolite.load-generator.gradle
@@ -0,0 +1,32 @@
+plugins {
+  // common java conventions
+  id 'theodolite.java-conventions'
+
+  // make executable
+  id 'application'
+}
+
+tasks.distZip.enabled = false
+
+repositories {
+  jcenter()
+  maven {
+    url "https://oss.sonatype.org/content/repositories/snapshots/"
+  }
+  maven {
+      url 'https://packages.confluent.io/maven/'
+  }
+}
+
+dependencies {
+  // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+  implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+  implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+  implementation 'org.slf4j:slf4j-simple:1.7.25'
+
+  // These dependencies are used for the workload-generator-commmon
+  implementation project(':load-generator-commons')
+
+  // Use JUnit test framework
+  testImplementation 'junit:junit:4.12'
+}
diff --git a/benchmarks/config/README.md b/theodolite-benchmarks/config/README.md
similarity index 100%
rename from benchmarks/config/README.md
rename to theodolite-benchmarks/config/README.md
diff --git a/benchmarks/config/checkstyle-suppression.xml b/theodolite-benchmarks/config/checkstyle-suppression.xml
similarity index 100%
rename from benchmarks/config/checkstyle-suppression.xml
rename to theodolite-benchmarks/config/checkstyle-suppression.xml
diff --git a/benchmarks/config/checkstyle.xml b/theodolite-benchmarks/config/checkstyle.xml
similarity index 100%
rename from benchmarks/config/checkstyle.xml
rename to theodolite-benchmarks/config/checkstyle.xml
diff --git a/benchmarks/config/eclipse-cleanup.xml b/theodolite-benchmarks/config/eclipse-cleanup.xml
similarity index 100%
rename from benchmarks/config/eclipse-cleanup.xml
rename to theodolite-benchmarks/config/eclipse-cleanup.xml
diff --git a/benchmarks/config/eclipse-formatter.xml b/theodolite-benchmarks/config/eclipse-formatter.xml
similarity index 100%
rename from benchmarks/config/eclipse-formatter.xml
rename to theodolite-benchmarks/config/eclipse-formatter.xml
diff --git a/benchmarks/config/eclipse-import-order.importorder b/theodolite-benchmarks/config/eclipse-import-order.importorder
similarity index 100%
rename from benchmarks/config/eclipse-import-order.importorder
rename to theodolite-benchmarks/config/eclipse-import-order.importorder
diff --git a/benchmarks/config/pmd.xml b/theodolite-benchmarks/config/pmd.xml
similarity index 100%
rename from benchmarks/config/pmd.xml
rename to theodolite-benchmarks/config/pmd.xml
diff --git a/benchmarks/config/spotbugs-exclude-filter.xml b/theodolite-benchmarks/config/spotbugs-exclude-filter.xml
similarity index 100%
rename from benchmarks/config/spotbugs-exclude-filter.xml
rename to theodolite-benchmarks/config/spotbugs-exclude-filter.xml
diff --git a/theodolite-benchmarks/definitions/install-configmaps.sh b/theodolite-benchmarks/definitions/install-configmaps.sh
new file mode 100755
index 0000000000000000000000000000000000000000..841a293bbb77c4960a2532a13a009a42227223d3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/install-configmaps.sh
@@ -0,0 +1,17 @@
+# Flink 
+kubectl create configmap benchmark-resources-uc1-flink --from-file uc1-flink/resources
+kubectl create configmap benchmark-resources-uc2-flink --from-file uc2-flink/resources
+kubectl create configmap benchmark-resources-uc3-flink --from-file uc3-flink/resources
+kubectl create configmap benchmark-resources-uc4-flink --from-file uc4-flink/resources
+
+# Kafka Streams
+kubectl create configmap benchmark-resources-uc1-kstreams --from-file uc1-kstreams/resources
+kubectl create configmap benchmark-resources-uc2-kstreams --from-file uc2-kstreams/resources
+kubectl create configmap benchmark-resources-uc3-kstreams --from-file uc3-kstreams/resources
+kubectl create configmap benchmark-resources-uc4-kstreams --from-file uc4-kstreams/resources
+
+# Load Generator
+kubectl create configmap benchmark-resources-uc1-load-generator --from-file uc1-load-generator/resources
+kubectl create configmap benchmark-resources-uc2-load-generator --from-file uc2-load-generator/resources
+kubectl create configmap benchmark-resources-uc3-load-generator --from-file uc3-load-generator/resources
+kubectl create configmap benchmark-resources-uc4-load-generator --from-file uc4-load-generator/resources
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..36178e2bebdac96b8648bd6c299009aa49d3fff6
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    jobmanager.rpc.address: flink-jobmanager
+    taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f328b1cd553c8036e570d28b97795fb2b00ec81
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-deployment.yaml
@@ -0,0 +1,94 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc1-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc1.application.HistoryServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/service-monitor.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c2266a4aeb21302262279f147e6512d5264e1dc1
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/taskmanager-deployment.yaml
@@ -0,0 +1,88 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc1-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc1-flink/resources/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc1-flink/resources/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/resources/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/uc1-flink-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc1-flink/uc1-flink-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..89bac41ee5c8dcefa628b3cb01052df5a1df9292
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/uc1-flink-benchmark-operator.yaml
@@ -0,0 +1,57 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc1-flink
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc1-flink"
+          files:
+          - "flink-configuration-configmap.yaml"
+          - "taskmanager-deployment.yaml"
+          - "taskmanager-service.yaml"
+          - "service-monitor.yaml"
+          - "jobmanager-service.yaml"
+          - "jobmanager-deployment.yaml"
+          #- "jobmanager-rest-service.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc1-load-generator"
+          files:
+          - "uc1-load-generator-deployment.yaml"
+          - "uc1-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "taskmanager-deployment.yaml"
+        - type: "EnvVarPatcher"
+          resource: "jobmanager-deployment.yaml"
+          properties:
+            container: "jobmanager"
+            variableName: "PARALLELISM"
+        - type: "EnvVarPatcher" # required?
+          resource: "taskmanager-deployment.yaml"
+          properties:
+            container: "taskmanager"
+            variableName: "PARALLELISM"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
\ No newline at end of file
diff --git a/execution/uc-application/jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-jmx-configmap.yaml
similarity index 100%
rename from execution/uc-application/jmx-configmap.yaml
rename to theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-jmx-configmap.yaml
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..171c3446db2719ee91bd8954233015316851fcf9
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/execution/uc-application/aggregation-service.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-kstreams-service.yaml
similarity index 86%
rename from execution/uc-application/aggregation-service.yaml
rename to theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-kstreams-service.yaml
index 6317caf9fe624e42449b8f630d040a068709cda3..85432d04f225c30469f3232153ef6bd72bd02bdf 100644
--- a/execution/uc-application/aggregation-service.yaml
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-kstreams-service.yaml
@@ -1,14 +1,14 @@
 apiVersion: v1
 kind: Service
-metadata:
+metadata:  
   name: titan-ccp-aggregation
   labels:
     app: titan-ccp-aggregation
 spec:
   #type: NodePort
-  selector:
+  selector:    
     app: titan-ccp-aggregation
-  ports:
+  ports:  
   - name: http
     port: 80
     targetPort: 80
diff --git a/execution/uc-application/service-monitor.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-service-monitor.yaml
similarity index 100%
rename from execution/uc-application/service-monitor.yaml
rename to theodolite-benchmarks/definitions/uc1-kstreams/resources/uc1-service-monitor.yaml
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fb5557c2df8b483164d3c1000717db4c7cface81
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-benchmark-operator.yaml
@@ -0,0 +1,44 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc1-kstreams
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc1-kstreams"
+          files:
+          - "uc1-kstreams-deployment.yaml"
+          - "uc1-kstreams-service.yaml"
+          - "uc1-jmx-configmap.yaml"
+          - "uc1-service-monitor.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc1-load-generator"
+          files:
+          - "uc1-load-generator-deployment.yaml"
+          - "uc1-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc1-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5aaf87e724a4e8c728c3c15b998cb927ff57f3d5
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-benchmark-standalone.yaml
@@ -0,0 +1,39 @@
+name: "uc1-kstreams"
+sut:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc1-kstreams"
+        files:
+        - "uc1-kstreams-deployment.yaml"
+        - "uc1-kstreams-service.yaml"
+        - "uc1-jmx-configmap.yaml"
+        - "uc1-service-monitor.yaml"
+loadGenerator:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc1-load-generator"
+        files:
+        - "uc1-load-generator-deployment.yaml"
+        - "uc1-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc1-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc1-load-generator-deployment.yaml"
+        container: "workload-generator"
+        variableName: "NUM_SENSORS"
+      - type: NumSensorsLoadGeneratorReplicaPatcher
+        resource: "uc1-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "150000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-load-generator/resources/uc1-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc1-load-generator/resources/uc1-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9f9ccc6ae39407bb1f027e1e23cb152944b869e0
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-load-generator/resources/uc1-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/execution/uc-workload-generator/load-generator-service.yaml b/theodolite-benchmarks/definitions/uc1-load-generator/resources/uc1-load-generator-service.yaml
similarity index 71%
rename from execution/uc-workload-generator/load-generator-service.yaml
rename to theodolite-benchmarks/definitions/uc1-load-generator/resources/uc1-load-generator-service.yaml
index c1299e373009dee5fa4cc87093ebc684c7f2e333..f8b26b3f6dece427f9c1ad4db94e351b042749b3 100644
--- a/execution/uc-workload-generator/load-generator-service.yaml
+++ b/theodolite-benchmarks/definitions/uc1-load-generator/resources/uc1-load-generator-service.yaml
@@ -10,7 +10,7 @@ spec:
   selector:
     app: titan-ccp-load-generator
   ports:
-  - name: coordination
-    port: 5701
-    targetPort: 5701
-    protocol: TCP
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..321541f6ac8715b8546b964d8ad2b7c28552fbcd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    #jobmanager.rpc.address: flink-jobmanager
+    #taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..87ea174f71c592bbffab4e5fc9ce6e3963596b9c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-deployment.yaml
@@ -0,0 +1,94 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc2-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc2.application.HistoryServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/service-monitor.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c37df972a334a4a0e27f0420030f99f1dff15b53
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/taskmanager-deployment.yaml
@@ -0,0 +1,88 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc2-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc2-flink/resources/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc2-flink/resources/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/resources/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/uc2-flink-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc2-flink/uc2-flink-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..206fbf9683659fcc074341d7077da04c36909b75
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/uc2-flink-benchmark-operator.yaml
@@ -0,0 +1,62 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc2-flink
+spec:
+  sut:
+    resources:
+        - configMap:
+            name: "benchmark-resources-uc2-flink"
+            files:
+            - "flink-configuration-configmap.yaml"
+            - "taskmanager-deployment.yaml"
+            - "taskmanager-service.yaml"
+            - "service-monitor.yaml"
+            - "jobmanager-service.yaml"
+            - "jobmanager-deployment.yaml"
+            #- "jobmanager-rest-service.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc2-load-generator"
+          files:
+            - "uc2-load-generator-deployment.yaml"
+            - "uc2-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "taskmanager-deployment.yaml"
+        - type: "EnvVarPatcher"
+          resource: "jobmanager-deployment.yaml"
+          properties:
+            container: "jobmanager"
+            variableName: "PARALLELISM"
+        - type: "EnvVarPatcher" # required?
+          resource: "taskmanager-deployment.yaml"
+          properties:
+            container: "taskmanager"
+            variableName: "PARALLELISM"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc2-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc2-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e07bb3f9e536655712c06a004c5d1fb60ffa67e0
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc2-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-service-monitor.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/resources/uc2-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0db22fa95f46d1cb484fa1a7730b8b6801dac67c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-benchmark-operator.yaml
@@ -0,0 +1,49 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc2-kstreams
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc2-kstreams"
+          files:
+            - "uc2-kstreams-deployment.yaml"
+            - "uc2-kstreams-service.yaml"
+            - "uc2-jmx-configmap.yaml"
+            - "uc2-service-monitor.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc2-load-generator"
+          files:
+            - "uc2-load-generator-deployment.yaml"
+            - "uc2-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc2-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc2-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc2-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..67376d76bf0a7cc4cd47563a1d8da8dc0aa3b944
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-benchmark-standalone.yaml
@@ -0,0 +1,45 @@
+name: "uc2-kstreams"
+sut:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc2-kstreams"
+        files:
+          - "uc2-kstreams-deployment.yaml"
+          - "uc2-kstreams-service.yaml"
+          - "uc2-jmx-configmap.yaml"
+          - "uc2-service-monitor.yaml"
+loadGenerator:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc2-load-generator"
+        files:
+          - "uc2-load-generator-deployment.yaml"
+          - "uc2-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc2-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc2-load-generator-deployment.yaml"
+        properties:
+          container: "workload-generator"
+          variableName: "NUM_SENSORS"
+      - type: NumSensorsLoadGeneratorReplicaPatcher
+        resource: "uc2-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "150000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "output"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc2-load-generator/resources/uc2-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc2-load-generator/resources/uc2-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dfc0af71543c15b12b5c850919feb0e0a4f52f28
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-load-generator/resources/uc2-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc2-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/theodolite-benchmarks/definitions/uc2-load-generator/resources/uc2-load-generator-service.yaml b/theodolite-benchmarks/definitions/uc2-load-generator/resources/uc2-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-load-generator/resources/uc2-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..321541f6ac8715b8546b964d8ad2b7c28552fbcd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    #jobmanager.rpc.address: flink-jobmanager
+    #taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d01123b13fe2d63637ee4000051091a99bad0546
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-deployment.yaml
@@ -0,0 +1,94 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc3-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc3.application.HistoryServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/service-monitor.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..495f97817e43d692c30fe898c4ef3118cae682d7
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/taskmanager-deployment.yaml
@@ -0,0 +1,88 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc3-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc3-flink/resources/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc3-flink/resources/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/resources/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/uc3-flink-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc3-flink/uc3-flink-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..47b64d9890fc0f300ee1bd8e67acbdf7c8c4e4f9
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/uc3-flink-benchmark-operator.yaml
@@ -0,0 +1,62 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc3-flink
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc3-flink"
+          files:
+            - "flink-configuration-configmap.yaml"
+            - "taskmanager-deployment.yaml"
+            - "taskmanager-service.yaml"
+            - "service-monitor.yaml"
+            - "jobmanager-service.yaml"
+            - "jobmanager-deployment.yaml"
+            #- "jobmanager-rest-service.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc3-load-generator"
+          files:
+            - "uc3-load-generator-deployment.yaml"
+            - "uc3-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "taskmanager-deployment.yaml"
+        - type: "EnvVarPatcher"
+          resource: "jobmanager-deployment.yaml"
+          properties:
+            container: "jobmanager"
+            variableName: "PARALLELISM"
+        - type: "EnvVarPatcher" # required?
+          resource: "taskmanager-deployment.yaml"
+          properties:
+            container: "taskmanager"
+            variableName: "PARALLELISM"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc3-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc3-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e3f63fae9e245e6116e0fe451480d9bc74b36433
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc3-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-service-monitor.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/resources/uc3-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..25374ad92a32782857cea5924ea6482060832eac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-benchmark-operator.yaml
@@ -0,0 +1,49 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc3-kstreams
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc3-kstreams"
+          files:
+          - "uc3-kstreams-deployment.yaml"
+          - "uc3-kstreams-service.yaml"
+          - "uc3-jmx-configmap.yaml"
+          - "uc3-service-monitor.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc3-load-generator"
+          files:
+            - "uc3-load-generator-deployment.yaml"
+            - "uc3-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc3-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc3-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc3-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aa92913d2c992835078174747ea849ce296c3eb1
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-benchmark-standalone.yaml
@@ -0,0 +1,45 @@
+name: "uc3-kstreams"
+sut:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc3-kstreams"
+        files:
+        - "uc3-kstreams-deployment.yaml"
+        - "uc3-kstreams-service.yaml"
+        - "uc3-jmx-configmap.yaml"
+        - "uc3-service-monitor.yaml"
+loadGenerator:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc3-load-generator"
+        files:
+          - "uc3-load-generator-deployment.yaml"
+          - "uc3-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc3-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc3-load-generator-deployment.yaml"
+        properties:
+          container: "workload-generator"
+          variableName: "NUM_SENSORS"
+      - type: NumSensorsLoadGeneratorReplicaPatcher
+        resource: "uc3-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "150000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "output"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc3-load-generator/resources/uc3-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc3-load-generator/resources/uc3-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c1cad0b70fd82a5bbb43792ee79f9cf5cc71d95f
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-load-generator/resources/uc3-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc3-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/theodolite-benchmarks/definitions/uc3-load-generator/resources/uc3-load-generator-service.yaml b/theodolite-benchmarks/definitions/uc3-load-generator/resources/uc3-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-load-generator/resources/uc3-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..321541f6ac8715b8546b964d8ad2b7c28552fbcd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    #jobmanager.rpc.address: flink-jobmanager
+    #taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..032499ea498f8155fd80e42ec4cbdd850498b217
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-deployment.yaml
@@ -0,0 +1,94 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc4-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc4.application.AggregationServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/service-monitor.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7af13f20b6b2edf3c8878adf4f381dc1c1add115
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-deployment.yaml
@@ -0,0 +1,88 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc4-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/resources/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/uc4-flink-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc4-flink/uc4-flink-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a73f5b0f87198def7b152ea52008e3d4a1aa4ee
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/uc4-flink-benchmark-operator.yaml
@@ -0,0 +1,69 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc4-flink
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc4-flink"
+          files:
+            - "flink-configuration-configmap.yaml"
+            - "taskmanager-deployment.yaml"
+            - "taskmanager-service.yaml"
+            - "service-monitor.yaml"
+            - "jobmanager-service.yaml"
+            - "jobmanager-deployment.yaml"
+            #- "jobmanager-rest-service.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc4-load-generator"
+          files:
+            - "uc4-load-generator-deployment.yaml"
+            - "uc4-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "taskmanager-deployment.yaml"
+        - type: "EnvVarPatcher"
+          resource: "jobmanager-deployment.yaml"
+          properties:
+            container: "jobmanager"
+            variableName: "PARALLELISM"
+        - type: "EnvVarPatcher" # required?
+          resource: "taskmanager-deployment.yaml"
+          properties:
+            container: "taskmanager"
+            variableName: "PARALLELISM"
+  loadTypes:
+    - typeName: "NumNestedGroups"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc4-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_NESTED_GROUPS"
+        - type: NumNestedGroupsLoadGeneratorReplicaPatcher
+          resource: "uc4-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+            numSensors: "4.0"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "configuration"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "aggregation-feedback"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..20e0872d262df46b5c213d9d529983f5f4155735
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc4-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-service-monitor.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/resources/uc4-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..655db2fd4122c9e0e844eed3bfe7c0a878c6d7ec
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-benchmark-operator.yaml
@@ -0,0 +1,56 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc4-kstreams
+spec:
+  sut:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc4-kstreams"
+          files:
+            - "uc4-kstreams-deployment.yaml"
+            - "uc4-kstreams-service.yaml"
+            - "uc4-jmx-configmap.yaml"
+            - "uc4-service-monitor.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+          name: "benchmark-resources-uc4-load-generator"
+          files:
+            - "uc4-load-generator-deployment.yaml"
+            - "uc4-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc4-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumNestedGroups"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc4-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumNestedGroupsLoadGeneratorReplicaPatcher
+          resource: "uc4-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "150000"
+            numSensors: "4.0"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "configuration"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "aggregation-feedback"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5c50b6f95d796941c0b2830549ef825f4a4ff6fb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-benchmark-standalone.yaml
@@ -0,0 +1,52 @@
+name: "uc4-kstreams"
+sut:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc4-kstreams"
+        files:
+          - "uc4-kstreams-deployment.yaml"
+          - "uc4-kstreams-service.yaml"
+          - "uc4-jmx-configmap.yaml"
+          - "uc4-service-monitor.yaml"
+loadGenerator:
+  resources:
+    - configMap:
+        name: "benchmark-resources-uc4-load-generator"
+        files:
+          - "uc4-load-generator-deployment.yaml"
+          - "uc4-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc4-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumNestedGroups"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc4-load-generator-deployment.yaml"
+        properties:
+          container: "workload-generator"
+          variableName: "NUM_NESTED_GROUPS"
+      - type: "NumNestedGroupsLoadGeneratorReplicaPatcher"
+        resource: "uc4-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "150000"
+          numSensors: "4.0"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "output"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "configuration"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "aggregation-feedback"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc4-load-generator/resources/uc4-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc4-load-generator/resources/uc4-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a69d13daae57b06c77f316da9aa953b21ac096b
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-load-generator/resources/uc4-load-generator-deployment.yaml
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc4-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: NUM_NESTED_GROUPS
+              value: "5"
diff --git a/theodolite-benchmarks/definitions/uc4-load-generator/resources/uc4-load-generator-service.yaml b/theodolite-benchmarks/definitions/uc4-load-generator/resources/uc4-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-load-generator/resources/uc4-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/docker-test/uc1-flink-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc1-flink-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..2a0e6cda45fb81b5b20c658d9c51a4ced1ab2aae
--- /dev/null
+++ b/theodolite-benchmarks/docker-test/uc1-flink-docker-compose/docker-compose.yml
@@ -0,0 +1,69 @@
+version: '2'
+services:
+  zookeeper:
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
+  kafka:
+    image: wurstmeister/kafka
+    expose:
+      - "9092"
+    #ports:
+    #  - 19092:19092
+    environment:
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    #ports:
+    #  - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+    environment:
+      BOOTSTRAP_SERVER: uc-wg:5701
+      PORT: 5701
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      NUM_SENSORS: 10
+  benchmark-jobmanager:
+    image: ghcr.io/cau-se/theodolite-uc1-flink:latest
+    ports:
+      - "8080:8081"
+    command: standalone-job --job-classname theodolite.uc1.application.HistoryServiceFlinkJob
+    environment:
+      - KAFKA_BOOTSTRAP_SERVERS=kafka:9092
+      - SCHEMA_REGISTRY_URL=http://schema-registry:8081
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+        parallelism.default: 1
+    depends_on:
+      - schema-registry
+      - kafka
+  benchmark-taskmanager:
+    image: ghcr.io/cau-se/theodolite-uc1-flink:latest
+    command: taskmanager
+    environment:
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+    depends_on:
+      - schema-registry
+      - kafka    
diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc1-kstreams-docker-compose/docker-compose.yml
similarity index 89%
rename from docker-test/uc1-docker-compose/docker-compose.yml
rename to theodolite-benchmarks/docker-test/uc1-kstreams-docker-compose/docker-compose.yml
index cdc9df40257362934a93fcbe2de24b6035d40bca..36717ed16bd46fd530bba1b02b0e32a929fa1efc 100755
--- a/docker-test/uc1-docker-compose/docker-compose.yml
+++ b/theodolite-benchmarks/docker-test/uc1-kstreams-docker-compose/docker-compose.yml
@@ -3,7 +3,7 @@ services:
   zookeeper:
     image: confluentinc/cp-zookeeper
     expose:
-      - "9092"
+      - "2181"
     environment:
       ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
@@ -31,16 +31,16 @@ services:
     environment:
       SCHEMA_REGISTRY_HOST_NAME: schema-registry
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
-  uc-app:
-    image: theodolite/theodolite-uc1-kstreams-app:latest
+  benchmark:
+    image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
     depends_on:
       - schema-registry
       - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
-  uc-wg: 
-    image: theodolite/theodolite-uc1-workload-generator:latest
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
     depends_on:
       - schema-registry
       - kafka
diff --git a/theodolite-benchmarks/docker-test/uc2-flink-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc2-flink-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..9afe3650368aa3f53a4a9272c29216c0dbda1933
--- /dev/null
+++ b/theodolite-benchmarks/docker-test/uc2-flink-docker-compose/docker-compose.yml
@@ -0,0 +1,69 @@
+version: '2'
+services:
+  zookeeper:
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
+  kafka:
+    image: wurstmeister/kafka
+    expose:
+      - "9092"
+    #ports:
+    #  - 19092:19092
+    environment:
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    #ports:
+    #  - "8081:8081"
+    expose:
+      - "8081"
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc2-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+    environment:
+      BOOTSTRAP_SERVER: uc-wg:5701
+      PORT: 5701
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      NUM_SENSORS: 10  
+  benchmark-jobmanager:
+    image: ghcr.io/cau-se/theodolite-uc2-flink:latest
+    ports:
+      - "8080:8081"
+    command: standalone-job --job-classname theodolite.uc2.application.HistoryServiceFlinkJob
+    environment:
+      - KAFKA_BOOTSTRAP_SERVERS=kafka:9092
+      - SCHEMA_REGISTRY_URL=http://schema-registry:8081
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+        parallelism.default: 1
+    depends_on:
+      - schema-registry
+      - kafka
+  benchmark-taskmanager:
+    image: ghcr.io/cau-se/theodolite-uc2-flink:latest
+    command: taskmanager
+    environment:
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+    depends_on:
+      - schema-registry
+      - kafka
diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc2-kstreams-docker-compose/docker-compose.yml
similarity index 87%
rename from docker-test/uc2-docker-compose/docker-compose.yml
rename to theodolite-benchmarks/docker-test/uc2-kstreams-docker-compose/docker-compose.yml
index 613553fcfa53122205b6e58d85fb7225eae90d7c..fc4748758cceb6948fc409704a6a9c69cf56649a 100755
--- a/docker-test/uc2-docker-compose/docker-compose.yml
+++ b/theodolite-benchmarks/docker-test/uc2-kstreams-docker-compose/docker-compose.yml
@@ -1,10 +1,9 @@
 version: '2'
 services:
   zookeeper:
-    #image: wurstmeister/zookeeper
     image: confluentinc/cp-zookeeper
-    ports:
-      - "2181:2181"
+    expose:
+      - "2181"
     environment:
       ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
@@ -32,8 +31,8 @@ services:
     environment:
       SCHEMA_REGISTRY_HOST_NAME: schema-registry
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
-  uc-app:
-    image: theodolite/theodolite-uc2-kstreams-app:latest
+  benchmark:
+    image: ghcr.io/cau-se/theodolite-uc2-kstreams-app:latest
     depends_on:
       - schema-registry
       - kafka
@@ -41,8 +40,8 @@ services:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
       KAFKA_WINDOW_DURATION_MINUTES: 60
-  uc-wg: 
-    image: theodolite/theodolite-uc2-workload-generator:latest
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc2-workload-generator:latest
     depends_on:
       - schema-registry
       - kafka
diff --git a/theodolite-benchmarks/docker-test/uc3-flink-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc3-flink-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..17dd9220c73f98f7c45463ab6dc998d2bdcc359c
--- /dev/null
+++ b/theodolite-benchmarks/docker-test/uc3-flink-docker-compose/docker-compose.yml
@@ -0,0 +1,69 @@
+version: '2'
+services:
+  zookeeper:
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
+  kafka:
+    image: wurstmeister/kafka
+    expose:
+      - "9092"
+    #ports:
+    #  - 19092:19092
+    environment:
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    #ports:
+    #  - "8081:8081"
+    expose:
+      - "8081"
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc3-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+    environment:
+      BOOTSTRAP_SERVER: uc-wg:5701
+      PORT: 5701
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      NUM_SENSORS: 10
+  benchmark-jobmanager:
+    image: ghcr.io/cau-se/theodolite-uc3-flink:latest
+    ports:
+      - "8080:8081"
+    command: standalone-job --job-classname theodolite.uc3.application.HistoryServiceFlinkJob
+    environment:
+      - KAFKA_BOOTSTRAP_SERVERS=kafka:9092
+      - SCHEMA_REGISTRY_URL=http://schema-registry:8081
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+        parallelism.default: 1
+    depends_on:
+      - schema-registry
+      - kafka
+  benchmark-taskmanager:
+    image: ghcr.io/cau-se/theodolite-uc3-flink:latest
+    command: taskmanager
+    environment:
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+    depends_on:
+      - schema-registry
+      - kafka
diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc3-kstreams-docker-compose/docker-compose.yml
similarity index 87%
rename from docker-test/uc3-docker-compose/docker-compose.yml
rename to theodolite-benchmarks/docker-test/uc3-kstreams-docker-compose/docker-compose.yml
index d321318b4024b678cf8f37007e90dc62a2042ece..1e5c22a59a6755ae975c3a760615be311cb4329f 100755
--- a/docker-test/uc3-docker-compose/docker-compose.yml
+++ b/theodolite-benchmarks/docker-test/uc3-kstreams-docker-compose/docker-compose.yml
@@ -1,10 +1,9 @@
 version: '2'
 services:
   zookeeper:
-    #image: wurstmeister/zookeeper
     image: confluentinc/cp-zookeeper
-    ports:
-      - "2181:2181"
+    expose:
+      - "2181"
     environment:
       ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
@@ -32,16 +31,16 @@ services:
     environment:
       SCHEMA_REGISTRY_HOST_NAME: schema-registry
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
-  uc-app:
-    image: theodolite/theodolite-uc3-kstreams-app:latest
+      benchmark:
+    image: ghcr.io/cau-se/theodolite-uc3-kstreams-app:latest
     depends_on:
       - schema-registry
       - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
-  uc-wg: 
-    image: theodolite/theodolite-uc3-workload-generator:latest
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc3-workload-generator:latest
     depends_on:
       - schema-registry
       - kafka
diff --git a/theodolite-benchmarks/docker-test/uc4-flink-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc4-flink-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..80720063991100bae2c8c148f14cd6f1a32bb0ff
--- /dev/null
+++ b/theodolite-benchmarks/docker-test/uc4-flink-docker-compose/docker-compose.yml
@@ -0,0 +1,70 @@
+version: '2'
+services:
+  zookeeper:
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
+  kafka:
+    image: wurstmeister/kafka
+    expose:
+      - "9092"
+    #ports:
+    #  - 19092:19092
+    environment:
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    #ports:
+    #  - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc4-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+    environment:
+      BOOTSTRAP_SERVER: uc-wg:5701
+      PORT: 5701
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      NUM_SENSORS: 4
+      NUM_NESTED_GROUPS: 4
+  benchmark-jobmanager:
+    image: ghcr.io/cau-se/theodolite-uc4-flink:latest
+    ports:
+      - "8080:8081"
+    command: standalone-job --job-classname theodolite.uc4.application.AggregationServiceFlinkJob
+    environment:
+      - KAFKA_BOOTSTRAP_SERVERS=kafka:9092
+      - SCHEMA_REGISTRY_URL=http://schema-registry:8081
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+        parallelism.default: 1
+    depends_on:
+      - schema-registry
+      - kafka
+  benchmark-taskmanager:
+    image: ghcr.io/cau-se/theodolite-uc4-flink:latest
+    command: taskmanager
+    environment:
+      - |
+        FLINK_PROPERTIES=
+        jobmanager.rpc.address: benchmark-jobmanager
+    depends_on:
+      - schema-registry
+      - kafka
diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/theodolite-benchmarks/docker-test/uc4-kstreams-docker-compose/docker-compose.yml
similarity index 90%
rename from docker-test/uc4-docker-compose/docker-compose.yml
rename to theodolite-benchmarks/docker-test/uc4-kstreams-docker-compose/docker-compose.yml
index d478d74e55a1b5423a390c624848b20f5faf2969..5e4cb94469f2f6cc8c48694a7ea6c885f066622d 100755
--- a/docker-test/uc4-docker-compose/docker-compose.yml
+++ b/theodolite-benchmarks/docker-test/uc4-kstreams-docker-compose/docker-compose.yml
@@ -31,16 +31,16 @@ services:
     environment:
       SCHEMA_REGISTRY_HOST_NAME: schema-registry
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
-  uc-app:
-    image: theodolite/theodolite-uc4-kstreams-app:latest
+  benchmark:
+    image: ghcr.io/cau-se/theodolite-uc4-kstreams-app:latest
     depends_on:
       - schema-registry
       - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
-  uc-wg: 
-    image: theodolite/theodolite-uc4-workload-generator:latest
+  load-generator: 
+    image: ghcr.io/cau-se/theodolite-uc4-workload-generator:latest
     depends_on:
       - schema-registry
       - kafka
diff --git a/theodolite-benchmarks/flink-commons/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/flink-commons/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..66b402b58f39b79066638ce679c27c0378d5be54
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,128 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=false
+sp_cleanup.remove_redundant_semicolons=false
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/flink-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/flink-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/flink-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/flink-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/flink-commons/build.gradle b/theodolite-benchmarks/flink-commons/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..0da7c6f93f4e77e1376f5f2d006ec0bf0f398ec8
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/build.gradle
@@ -0,0 +1,36 @@
+plugins {
+    id 'theodolite.java-commons'
+}
+
+ext {
+    flinkVersion = '1.12.0'
+    scalaBinaryVersion = '2.12'
+}
+
+repositories {
+  jcenter()
+  maven {
+    url "https://oss.sonatype.org/content/repositories/snapshots/"
+  }
+  maven {
+    url 'https://packages.confluent.io/maven/'
+  }
+}
+
+dependencies {
+    // Special version required because of https://issues.apache.org/jira/browse/FLINK-13703
+    implementation('org.industrial-devops:titan-ccp-common:0.1.0-flink-ready-SNAPSHOT') { changing = true }
+    implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+    implementation 'com.google.guava:guava:30.1-jre'
+
+    implementation "org.apache.flink:flink-java:${flinkVersion}"    
+    implementation "org.apache.flink:flink-connector-kafka_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-avro:${flinkVersion}"
+    implementation "org.apache.flink:flink-avro-confluent-registry:${flinkVersion}"
+    implementation "org.apache.flink:flink-runtime-web_${scalaBinaryVersion}:${flinkVersion}" // For debugging
+    implementation "org.apache.flink:flink-statebackend-rocksdb_${scalaBinaryVersion}:${flinkVersion}"
+    implementation "org.apache.flink:flink-metrics-prometheus_${scalaBinaryVersion}:${flinkVersion}"
+    
+    // Use JUnit test framework
+    testImplementation 'junit:junit:4.12'
+  }
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/ConfigurationKeys.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..2847ede440ecd65bdf35fc8e825d0f7b723a3f8f
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/ConfigurationKeys.java
@@ -0,0 +1,19 @@
+package theodolite.commons.flink;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+
+  public static final String FLINK_STATE_BACKEND = "flink.state.backend";
+
+  public static final String FLINK_STATE_BACKEND_PATH = "flink.state.backend.path";
+
+  public static final String FLINK_STATE_BACKEND_MEMORY_SIZE = // NOPMD
+      "flink.state.backend.memory.size";
+
+  public static final String FLINK_CHECKPOINTING = "checkpointing";
+
+  private ConfigurationKeys() {}
+
+}
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/KafkaConnectorFactory.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/KafkaConnectorFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..55d73b0fb9274b0ae67468d50b7978799d7e6257
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/KafkaConnectorFactory.java
@@ -0,0 +1,154 @@
+package theodolite.commons.flink;
+
+import java.time.Duration;
+import java.util.Properties;
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.flink.api.common.eventtime.WatermarkStrategy;
+import org.apache.flink.api.common.serialization.DeserializationSchema;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.formats.avro.registry.confluent.ConfluentRegistryAvroDeserializationSchema;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
+import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
+import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.common.serialization.Serde;
+import theodolite.commons.flink.serialization.FlinkKafkaKeyValueSerde;
+import theodolite.commons.flink.util.SerializableSupplier;
+
+/**
+ * A class for creating {@link FlinkKafkaConsumer} and {@link FlinkKafkaProducer}.
+ */
+public class KafkaConnectorFactory {
+
+  private static final Duration PRODUCER_TRANSACTION_TIMEOUT = Duration.ofMinutes(5);
+
+  private final Properties kafkaProps = new Properties();
+  private final boolean checkpointingEnabled;
+  private final String schemaRegistryUrl;
+
+  /**
+   * Create a new {@link KafkaConnectorFactory} from the provided parameters.
+   */
+  public KafkaConnectorFactory(
+      final String appName,
+      final String bootstrapServers,
+      final boolean checkpointingEnabled,
+      final String schemaRegistryUrl) {
+    this.checkpointingEnabled = checkpointingEnabled;
+    this.schemaRegistryUrl = schemaRegistryUrl;
+    this.kafkaProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+    this.kafkaProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, appName);
+  }
+
+  /**
+   * Create a new {@link FlinkKafkaConsumer} that consumes data using a
+   * {@link DeserializationSchema}.
+   */
+  public <T> FlinkKafkaConsumer<T> createConsumer(final String topic,
+      final DeserializationSchema<T> deserializationSchema) {
+    return this.createBaseConsumer(
+        new FlinkKafkaConsumer<>(topic, deserializationSchema, this.cloneProperties()));
+  }
+
+  /**
+   * Create a new {@link FlinkKafkaConsumer} that consumes data using a
+   * {@link KafkaDeserializationSchema}.
+   */
+  public <T> FlinkKafkaConsumer<T> createConsumer(final String topic,
+      final KafkaDeserializationSchema<T> deserializationSchema) {
+    return this.createBaseConsumer(
+        new FlinkKafkaConsumer<>(topic, deserializationSchema, this.cloneProperties()));
+  }
+
+  /**
+   * Create a new {@link FlinkKafkaConsumer} that consumes {@link Tuple2}s using two Kafka
+   * {@link Serde}s.
+   */
+  public <K, V> FlinkKafkaConsumer<Tuple2<K, V>> createConsumer(
+      final String topic,
+      final SerializableSupplier<Serde<K>> kafkaKeySerde,
+      final SerializableSupplier<Serde<V>> kafkaValueSerde,
+      final TypeInformation<Tuple2<K, V>> typeInformation) {
+    return this.<Tuple2<K, V>>createConsumer(
+        topic,
+        new FlinkKafkaKeyValueSerde<>(
+            topic,
+            kafkaKeySerde,
+            kafkaValueSerde,
+            typeInformation));
+  }
+
+  /**
+   * Create a new {@link FlinkKafkaConsumer} that consumes from a topic associated with Confluent
+   * Schema Registry.
+   */
+  public <T extends SpecificRecord> FlinkKafkaConsumer<T> createConsumer(final String topic,
+      final Class<T> typeClass) {
+    // Maybe move to subclass for Confluent-Schema-Registry-specific things
+    final DeserializationSchema<T> deserializationSchema =
+        ConfluentRegistryAvroDeserializationSchema.forSpecific(typeClass, this.schemaRegistryUrl);
+    return this.createConsumer(topic, deserializationSchema);
+  }
+
+  private <T> FlinkKafkaConsumer<T> createBaseConsumer(final FlinkKafkaConsumer<T> baseConsumer) {
+    baseConsumer.setStartFromGroupOffsets();
+    if (this.checkpointingEnabled) {
+      baseConsumer.setCommitOffsetsOnCheckpoints(true); // TODO Validate if this is sensible
+    }
+    baseConsumer.assignTimestampsAndWatermarks(WatermarkStrategy.forMonotonousTimestamps());
+    return baseConsumer;
+  }
+
+
+  /**
+   * Create a new {@link FlinkKafkaProducer} that produces data using a
+   * {@link KafkaSerializationSchema}.
+   */
+  public <T> FlinkKafkaProducer<T> createProducer(final String topic,
+      final KafkaSerializationSchema<T> serializationSchema) {
+    final Properties producerProps = this.buildProducerProperties();
+    return this.createBaseProducer(new FlinkKafkaProducer<>(
+        topic, serializationSchema, producerProps, FlinkKafkaProducer.Semantic.AT_LEAST_ONCE));
+  }
+
+  /**
+   * Create a new {@link FlinkKafkaProducer} that produces {@link Tuple2}s using two Kafka
+   * {@link Serde}s.
+   */
+  public <K, V> FlinkKafkaProducer<Tuple2<K, V>> createProducer(
+      final String topic,
+      final SerializableSupplier<Serde<K>> kafkaKeySerde,
+      final SerializableSupplier<Serde<V>> kafkaValueSerde,
+      final TypeInformation<Tuple2<K, V>> typeInformation) {
+    return this.createProducer(
+        topic,
+        new FlinkKafkaKeyValueSerde<>(
+            topic,
+            kafkaKeySerde,
+            kafkaValueSerde,
+            typeInformation));
+  }
+
+  private <T> FlinkKafkaProducer<T> createBaseProducer(final FlinkKafkaProducer<T> baseProducer) {
+    baseProducer.setWriteTimestampToKafka(true);
+    return baseProducer;
+  }
+
+  private Properties buildProducerProperties() {
+    final Properties producerProps = this.cloneProperties();
+    producerProps.setProperty(
+        ProducerConfig.TRANSACTION_TIMEOUT_CONFIG,
+        String.valueOf(PRODUCER_TRANSACTION_TIMEOUT.toMillis())); // TODO necessary?
+    return producerProps;
+  }
+
+  private Properties cloneProperties() {
+    final Properties props = new Properties();
+    props.putAll(this.kafkaProps);
+    return props;
+  }
+
+}
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/StateBackends.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/StateBackends.java
new file mode 100644
index 0000000000000000000000000000000000000000..a94927e4bf49e1dbe6d109eb8f19f7d292f3d879
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/StateBackends.java
@@ -0,0 +1,68 @@
+package theodolite.commons.flink;
+
+import java.io.IOException;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
+import org.apache.flink.runtime.state.StateBackend;
+import org.apache.flink.runtime.state.filesystem.FsStateBackend;
+import org.apache.flink.runtime.state.memory.MemoryStateBackend;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Provides factory methods for creating Flink {@link StateBackend}s.
+ */
+public final class StateBackends {
+
+  public static final String STATE_BACKEND_TYPE_MEMORY = "memory";
+  public static final String STATE_BACKEND_TYPE_FILESYSTEM = "filesystem";
+  public static final String STATE_BACKEND_TYPE_ROCKSDB = "rocksdb";
+  // public static final String STATE_BACKEND_TYPE_DEFAULT = STATE_BACKEND_TYPE_ROCKSDB;
+  public static final String STATE_BACKEND_TYPE_DEFAULT = STATE_BACKEND_TYPE_MEMORY;
+  public static final String DEFAULT_STATE_BACKEND_PATH = "file:///opt/flink/statebackend";
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(StateBackends.class);
+
+  private StateBackends() {}
+
+  /**
+   * Create a Flink {@link StateBackend} from a {@link Configuration} and the
+   * {@code ConfigurationKeys#FLINK_STATE_BACKEND},
+   * {@code ConfigurationKeys#FLINK_STATE_BACKEND_MEMORY_SIZE} and
+   * {@code ConfigurationKeys#FLINK_STATE_BACKEND_PATH} configuration keys. Possible options for the
+   * {@code ConfigurationKeys#FLINK_STATE_BACKEND} configuration are
+   * {@code #STATE_BACKEND_TYPE_ROCKSDB}, {@code #STATE_BACKEND_TYPE_FILESYSTEM} and
+   * {@code StateBackendFactory#STATE_BACKEND_TYPE_MEMORY}, where
+   * {@code StateBackendFactory#STATE_BACKEND_TYPE_ROCKSDB} is the default.
+   */
+  public static StateBackend fromConfiguration(final Configuration configuration) {
+    final String stateBackendType =
+        configuration.getString(ConfigurationKeys.FLINK_STATE_BACKEND, STATE_BACKEND_TYPE_DEFAULT);
+    switch (stateBackendType) {
+      case STATE_BACKEND_TYPE_MEMORY:
+        final int memoryStateBackendSize = configuration.getInt(
+            ConfigurationKeys.FLINK_STATE_BACKEND_MEMORY_SIZE,
+            MemoryStateBackend.DEFAULT_MAX_STATE_SIZE);
+        return new MemoryStateBackend(memoryStateBackendSize);
+      case STATE_BACKEND_TYPE_FILESYSTEM:
+        final String stateBackendPath = configuration.getString(
+            ConfigurationKeys.FLINK_STATE_BACKEND_PATH,
+            DEFAULT_STATE_BACKEND_PATH);
+        return new FsStateBackend(stateBackendPath);
+      case STATE_BACKEND_TYPE_ROCKSDB:
+        final String stateBackendPath2 = configuration.getString(
+            ConfigurationKeys.FLINK_STATE_BACKEND_PATH,
+            DEFAULT_STATE_BACKEND_PATH);
+        try {
+          return new RocksDBStateBackend(stateBackendPath2, true);
+        } catch (final IOException e) {
+          LOGGER.error("Cannot create RocksDB state backend.", e);
+          throw new IllegalStateException(e);
+        }
+      default:
+        throw new IllegalArgumentException(
+            "Unsupported state backend '" + stateBackendType + "' configured.");
+    }
+  }
+
+}
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/TupleType.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/TupleType.java
new file mode 100644
index 0000000000000000000000000000000000000000..360331e4d1e4fdc47a24ac8ae995b7590301f7fd
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/TupleType.java
@@ -0,0 +1,22 @@
+package theodolite.commons.flink;
+
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.tuple.Tuple;
+import org.apache.flink.api.java.tuple.Tuple2;
+
+/**
+ * Helper methods for creating {@link TypeInformation} for {@link Tuple}s. In contrast to
+ * {@code Types#TUPLE(TypeInformation...)}, these methods bring real type safety.
+ */
+public final class TupleType {
+
+  private TupleType() {}
+
+  public static <T1, T2> TypeInformation<Tuple2<T1, T2>> of(// NOPMD
+      final TypeInformation<T1> t0,
+      final TypeInformation<T2> t1) {
+    return Types.TUPLE(t0, t1);
+  }
+
+}
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/serialization/FlinkKafkaKeyValueSerde.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/serialization/FlinkKafkaKeyValueSerde.java
new file mode 100644
index 0000000000000000000000000000000000000000..22f615a6af4caf575af57dbe9b7f989889c4095f
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/serialization/FlinkKafkaKeyValueSerde.java
@@ -0,0 +1,80 @@
+package theodolite.commons.flink.serialization;
+
+import javax.annotation.Nullable;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.streaming.connectors.kafka.KafkaDeserializationSchema;
+import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.Serde;
+import theodolite.commons.flink.util.SerializableSupplier;
+
+/**
+ * A {@link KafkaSerializationSchema} and {@link KafkaDeserializationSchema} for an arbitrary
+ * key-value-pair in Kafka, mapped to/from a Flink {@link Tuple2}.
+ *
+ * @param <K> Type of the key.
+ * @param <V> Type of the value.
+ */
+public class FlinkKafkaKeyValueSerde<K, V>
+    implements KafkaDeserializationSchema<Tuple2<K, V>>, KafkaSerializationSchema<Tuple2<K, V>> {
+
+  private static final long serialVersionUID = 2469569396501933443L; // NOPMD
+
+  private final SerializableSupplier<Serde<K>> keySerdeSupplier;
+  private final SerializableSupplier<Serde<V>> valueSerdeSupplier;
+  private final String topic;
+  private final TypeInformation<Tuple2<K, V>> typeInfo;
+
+  private transient Serde<K> keySerde;
+  private transient Serde<V> valueSerde;
+
+  /**
+   * Create a new {@link FlinkKafkaKeyValueSerde}.
+   */
+  public FlinkKafkaKeyValueSerde(final String topic,
+      final SerializableSupplier<Serde<K>> keySerdeSupplier,
+      final SerializableSupplier<Serde<V>> valueSerdeSupplier,
+      final TypeInformation<Tuple2<K, V>> typeInfo) {
+    this.topic = topic;
+    this.typeInfo = typeInfo;
+    this.keySerdeSupplier = keySerdeSupplier;
+    this.valueSerdeSupplier = valueSerdeSupplier;
+  }
+
+  @Override
+  public boolean isEndOfStream(final Tuple2<K, V> nextElement) {
+    return false;
+  }
+
+  @Override
+  public Tuple2<K, V> deserialize(final ConsumerRecord<byte[], byte[]> record) {
+    this.ensureInitialized();
+    final K key = this.keySerde.deserializer().deserialize(this.topic, record.key());
+    final V value = this.valueSerde.deserializer().deserialize(this.topic, record.value());
+    return new Tuple2<>(key, value);
+  }
+
+  @Override
+  public TypeInformation<Tuple2<K, V>> getProducedType() {
+    return this.typeInfo;
+  }
+
+  @Override
+  public ProducerRecord<byte[], byte[]> serialize(final Tuple2<K, V> element,
+      @Nullable final Long timestamp) {
+    this.ensureInitialized();
+    final byte[] key = this.keySerde.serializer().serialize(this.topic, element.f0);
+    final byte[] value = this.valueSerde.serializer().serialize(this.topic, element.f1);
+    return new ProducerRecord<>(this.topic, key, value);
+  }
+
+  private void ensureInitialized() {
+    if (this.keySerde == null || this.valueSerde == null) {
+      this.keySerde = this.keySerdeSupplier.get();
+      this.valueSerde = this.valueSerdeSupplier.get();
+    }
+  }
+
+}
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/serialization/StatsSerializer.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/serialization/StatsSerializer.java
new file mode 100644
index 0000000000000000000000000000000000000000..f1f9870fda73ccec0fc25c5c70665759ab07d893
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/serialization/StatsSerializer.java
@@ -0,0 +1,30 @@
+package theodolite.commons.flink.serialization;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import com.google.common.math.Stats;
+
+import java.io.Serializable;
+
+/**
+ * Custom Kryo {@link Serializer} for efficient transmission between Flink instances.
+ */
+public class StatsSerializer extends Serializer<Stats> implements Serializable {
+
+  private static final long serialVersionUID = -1276866176534267373L; //NOPMD
+
+  @Override
+  public void write(final Kryo kryo, final Output output, final Stats object) {
+    final byte[] data = object.toByteArray();
+    output.writeInt(data.length);
+    output.writeBytes(data);
+  }
+
+  @Override
+  public Stats read(final Kryo kryo, final Input input, final Class<Stats> type) {
+    final int numBytes = input.readInt();
+    return Stats.fromByteArray(input.readBytes(numBytes));
+  }
+}
diff --git a/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/util/SerializableSupplier.java b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/util/SerializableSupplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..bcc51a9ef7b8bb0f36398ea401f1d2c898472081
--- /dev/null
+++ b/theodolite-benchmarks/flink-commons/src/main/java/theodolite/commons/flink/util/SerializableSupplier.java
@@ -0,0 +1,13 @@
+package theodolite.commons.flink.util;
+
+import java.io.Serializable;
+import java.util.function.Supplier;
+
+/**
+ * Interface for {@link Supplier}s which are serializable.
+ *
+ * @param <T> the type of results supplied by this supplier
+ */
+public interface SerializableSupplier<T> extends Supplier<T>, Serializable { // NOPMD
+  // Nothing to do here
+}
diff --git a/theodolite-benchmarks/gradle/wrapper/gradle-wrapper.jar b/theodolite-benchmarks/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..e708b1c023ec8b20f512888fe07c5bd3ff77bb8f
Binary files /dev/null and b/theodolite-benchmarks/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/benchmarks/gradle/wrapper/gradle-wrapper.properties b/theodolite-benchmarks/gradle/wrapper/gradle-wrapper.properties
similarity index 92%
rename from benchmarks/gradle/wrapper/gradle-wrapper.properties
rename to theodolite-benchmarks/gradle/wrapper/gradle-wrapper.properties
index 4d9ca1649142b0c20144adce78e2472e2da01c30..442d9132ea32808ad980df4bd233b359f76341a7 100644
--- a/benchmarks/gradle/wrapper/gradle-wrapper.properties
+++ b/theodolite-benchmarks/gradle/wrapper/gradle-wrapper.properties
@@ -1,5 +1,5 @@
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.8.3-bin.zip
 zipStoreBase=GRADLE_USER_HOME
 zipStorePath=wrapper/dists
diff --git a/benchmarks/gradlew b/theodolite-benchmarks/gradlew
similarity index 75%
rename from benchmarks/gradlew
rename to theodolite-benchmarks/gradlew
index af6708ff229fda75da4f7cc4da4747217bac4d53..4f906e0c811fc9e230eb44819f509cd0627f2600 100755
--- a/benchmarks/gradlew
+++ b/theodolite-benchmarks/gradlew
@@ -1,5 +1,21 @@
 #!/usr/bin/env sh
 
+#
+# Copyright 2015 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
 ##############################################################################
 ##
 ##  Gradle start up script for UN*X
@@ -28,7 +44,7 @@ APP_NAME="Gradle"
 APP_BASE_NAME=`basename "$0"`
 
 # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-DEFAULT_JVM_OPTS='"-Xmx64m"'
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
 
 # Use the maximum available, or set MAX_FD != -1 to use that value.
 MAX_FD="maximum"
@@ -66,6 +82,7 @@ esac
 
 CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
 
+
 # Determine the Java command to use to start the JVM.
 if [ -n "$JAVA_HOME" ] ; then
     if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
@@ -109,10 +126,11 @@ if $darwin; then
     GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
 fi
 
-# For Cygwin, switch paths to Windows format before running java
-if $cygwin ; then
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
     APP_HOME=`cygpath --path --mixed "$APP_HOME"`
     CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+
     JAVACMD=`cygpath --unix "$JAVACMD"`
 
     # We build the pattern for arguments to be converted via cygpath
@@ -138,19 +156,19 @@ if $cygwin ; then
         else
             eval `echo args$i`="\"$arg\""
         fi
-        i=$((i+1))
+        i=`expr $i + 1`
     done
     case $i in
-        (0) set -- ;;
-        (1) set -- "$args0" ;;
-        (2) set -- "$args0" "$args1" ;;
-        (3) set -- "$args0" "$args1" "$args2" ;;
-        (4) set -- "$args0" "$args1" "$args2" "$args3" ;;
-        (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
-        (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
-        (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
-        (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
-        (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+        0) set -- ;;
+        1) set -- "$args0" ;;
+        2) set -- "$args0" "$args1" ;;
+        3) set -- "$args0" "$args1" "$args2" ;;
+        4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+        5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+        6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+        7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+        8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+        9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
     esac
 fi
 
@@ -159,14 +177,9 @@ save () {
     for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
     echo " "
 }
-APP_ARGS=$(save "$@")
+APP_ARGS=`save "$@"`
 
 # Collect all arguments for the java command, following the shell quoting and substitution rules
 eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
 
-# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong
-if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then
-  cd "$(dirname "$0")"
-fi
-
 exec "$JAVACMD" "$@"
diff --git a/theodolite-benchmarks/gradlew.bat b/theodolite-benchmarks/gradlew.bat
new file mode 100644
index 0000000000000000000000000000000000000000..ac1b06f93825db68fb0c0b5150917f340eaa5d02
--- /dev/null
+++ b/theodolite-benchmarks/gradlew.bat
@@ -0,0 +1,89 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem      https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem  Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto execute
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if  not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/kstreams-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/kstreams-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/kstreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/kstreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/kstreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/kstreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/kstreams-commons/build.gradle b/theodolite-benchmarks/kstreams-commons/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..c5a880acd4377056cc0b0f06b33a2d74c9f87c4e
--- /dev/null
+++ b/theodolite-benchmarks/kstreams-commons/build.gradle
@@ -0,0 +1,24 @@
+plugins {
+    id 'theodolite.java-commons'
+}
+
+repositories {
+  jcenter()
+  maven {
+    url "https://oss.sonatype.org/content/repositories/snapshots/"
+  }
+  maven {
+    url 'https://packages.confluent.io/maven/'
+  }
+}
+
+dependencies {
+  // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+  // implementation 'org.slf4j:slf4j-simple:1.7.25'
+  implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+  implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+  implementation 'org.apache.kafka:kafka-streams:2.6.0'
+
+  // Use JUnit test framework
+  testImplementation 'junit:junit:4.12'
+}
diff --git a/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java b/theodolite-benchmarks/kstreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
similarity index 100%
rename from benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
rename to theodolite-benchmarks/kstreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
diff --git a/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/theodolite-benchmarks/kstreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
similarity index 100%
rename from benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
rename to theodolite-benchmarks/kstreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
diff --git a/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/load-generator-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/load-generator-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/load-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/load-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/load-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/load-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/load-generator-commons/build.gradle b/theodolite-benchmarks/load-generator-commons/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..118f3e648f829a3eafe719ddf660d35ac8563574
--- /dev/null
+++ b/theodolite-benchmarks/load-generator-commons/build.gradle
@@ -0,0 +1,26 @@
+plugins {
+    id 'theodolite.java-commons'
+}
+
+repositories {
+  jcenter()
+  maven {
+    url "https://oss.sonatype.org/content/repositories/snapshots/"
+  }
+  maven {
+      url 'https://packages.confluent.io/maven/'
+  }
+}
+
+dependencies {
+  implementation 'com.google.guava:guava:30.1-jre'
+  implementation 'com.hazelcast:hazelcast:4.1.1'
+  implementation 'com.hazelcast:hazelcast-kubernetes:2.2.1'
+  implementation 'org.slf4j:slf4j-simple:1.7.25'
+  implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+  implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+  implementation 'org.apache.kafka:kafka-streams:2.6.0' // TODO required?
+
+  // Use JUnit test framework
+  testImplementation 'junit:junit:4.12'
+}
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java
similarity index 93%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java
index dd17234bf1adb1f0fcf3ff3ab134a0743b917369..6e4a43271fbf1e0193c2d39569a0814d1f7935cd 100644
--- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java
+++ b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java
@@ -6,6 +6,7 @@ import org.apache.avro.specific.SpecificRecord;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.Producer;
 import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.errors.SerializationException;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -116,7 +117,13 @@ public class KafkaRecordSender<T extends SpecificRecord> implements RecordSender
             this.keyAccessor.apply(monitoringRecord), monitoringRecord);
 
     LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
+    try {
+      this.producer.send(record);
+    } catch (final SerializationException e) {
+      LOGGER.warn(
+          "Record could not be serialized and thus not sent to Kafka due to exception. Skipping this record.", // NOCS
+          e);
+    }
   }
 
   public void terminate() {
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java
diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java b/theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java
rename to theodolite-benchmarks/load-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java
diff --git a/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java b/theodolite-benchmarks/load-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java
rename to theodolite-benchmarks/load-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java
diff --git a/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java b/theodolite-benchmarks/load-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java
similarity index 100%
rename from benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java
rename to theodolite-benchmarks/load-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java
diff --git a/theodolite-benchmarks/settings.gradle b/theodolite-benchmarks/settings.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..5602e816bb21dce72162b085de99836b8f9aea1e
--- /dev/null
+++ b/theodolite-benchmarks/settings.gradle
@@ -0,0 +1,21 @@
+rootProject.name = 'theodolite-benchmarks'
+
+include 'load-generator-commons'
+include 'kstreams-commons'
+include 'flink-commons'
+
+include 'uc1-load-generator'
+include 'uc1-kstreams'
+include 'uc1-flink'
+
+include 'uc2-load-generator'
+include 'uc2-kstreams'
+include 'uc2-flink'
+
+include 'uc3-load-generator'
+include 'uc3-kstreams'
+include 'uc3-flink'
+
+include 'uc4-load-generator'
+include 'uc4-kstreams'
+include 'uc4-flink'
diff --git a/benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc1-flink/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc1-flink/.settings/org.eclipse.jdt.ui.prefs
diff --git a/benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc1-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc1-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc1-flink/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc1-flink/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/uc1-flink/Dockerfile b/theodolite-benchmarks/uc1-flink/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..795b9e343a03cf0209e1625f5cbc3d45dcb77cda
--- /dev/null
+++ b/theodolite-benchmarks/uc1-flink/Dockerfile
@@ -0,0 +1,3 @@
+FROM flink:1.12-scala_2.12-java11
+
+ADD build/libs/uc1-flink-all.jar /opt/flink/usrlib/artifacts/uc1-flink-all.jar
diff --git a/theodolite-benchmarks/uc1-flink/build.gradle b/theodolite-benchmarks/uc1-flink/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..8a2a359c4840e67581f7bc24f1544ff519f82525
--- /dev/null
+++ b/theodolite-benchmarks/uc1-flink/build.gradle
@@ -0,0 +1,5 @@
+plugins {
+  id 'theodolite.flink'
+}
+
+mainClassName = "theodolite.uc1.application.HistoryServiceFlinkJob"
diff --git a/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/ConfigurationKeys.java b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..382525cfe75f82dbbe8fbcc85308b0e7788a43bc
--- /dev/null
+++ b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/ConfigurationKeys.java
@@ -0,0 +1,26 @@
+package theodolite.uc1.application;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+
+  public static final String APPLICATION_NAME = "application.name";
+
+  public static final String APPLICATION_VERSION = "application.version";
+
+  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+
+  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
+
+  public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
+
+  public static final String CHECKPOINTING = "checkpointing";
+
+  public static final String PARALLELISM = "parallelism";
+
+  private ConfigurationKeys() {}
+
+}
diff --git a/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/GsonMapper.java b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/GsonMapper.java
new file mode 100644
index 0000000000000000000000000000000000000000..831db7fe63be6529e6b7ba299dca92b138ff7d13
--- /dev/null
+++ b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/GsonMapper.java
@@ -0,0 +1,22 @@
+package theodolite.uc1.application;
+
+import com.google.gson.Gson;
+import org.apache.flink.api.common.functions.MapFunction;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * {@link MapFunction} which maps {@link ActivePowerRecord}s to their representation as JSON
+ * strings.
+ */
+public class GsonMapper implements MapFunction<ActivePowerRecord, String> {
+
+  private static final long serialVersionUID = -5263671231838353747L; // NOPMD
+
+  private static final Gson GSON = new Gson();
+
+  @Override
+  public String map(final ActivePowerRecord value) throws Exception {
+    return GSON.toJson(value);
+  }
+
+}
diff --git a/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java
new file mode 100644
index 0000000000000000000000000000000000000000..0cb132e526486e71409736b843dd25bdfa52da4a
--- /dev/null
+++ b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java
@@ -0,0 +1,91 @@
+package theodolite.uc1.application;
+
+import org.apache.commons.configuration2.Configuration;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.flink.KafkaConnectorFactory;
+import titan.ccp.common.configuration.ServiceConfigurations;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * The History microservice implemented as a Flink job.
+ */
+public final class HistoryServiceFlinkJob {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(HistoryServiceFlinkJob.class);
+
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
+  private final StreamExecutionEnvironment env;
+  private final String applicationId;
+
+  /**
+   * Create a new instance of the {@link HistoryServiceFlinkJob}.
+   */
+  public HistoryServiceFlinkJob() {
+    final String applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    final String applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.applicationId = applicationName + "-" + applicationVersion;
+
+    this.env = StreamExecutionEnvironment.getExecutionEnvironment();
+
+    this.configureEnv();
+
+    this.buildPipeline();
+  }
+
+  private void configureEnv() {
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+    final int commitIntervalMs = this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS);
+    if (checkpointing) {
+      this.env.enableCheckpointing(commitIntervalMs);
+    }
+
+    // Parallelism
+    final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
+    if (parallelism != null) {
+      LOGGER.info("Set parallelism: {}.", parallelism);
+      this.env.setParallelism(parallelism);
+    }
+
+  }
+
+  private void buildPipeline() {
+    final String kafkaBroker = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    final String schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    final String inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+
+    final KafkaConnectorFactory kafkaConnector = new KafkaConnectorFactory(
+        this.applicationId, kafkaBroker, checkpointing, schemaRegistryUrl);
+
+    final FlinkKafkaConsumer<ActivePowerRecord> kafkaConsumer =
+        kafkaConnector.createConsumer(inputTopic, ActivePowerRecord.class);
+
+    final DataStream<ActivePowerRecord> stream = this.env.addSource(kafkaConsumer);
+
+    stream
+        // .rebalance()
+        .map(new GsonMapper())
+        .flatMap((record, c) -> LOGGER.info("Record: {}", record))
+        .returns(Types.GENERIC(Object.class)); // Will never be used
+  }
+
+  /**
+   * Start running this microservice.
+   */
+  public void run() {
+    try {
+      this.env.execute(this.applicationId);
+    } catch (final Exception e) { // NOPMD Execution thrown by Flink
+      LOGGER.error("An error occured while running this job.", e);
+    }
+  }
+
+  public static void main(final String[] args) {
+    new HistoryServiceFlinkJob().run();
+  }
+}
diff --git a/theodolite-benchmarks/uc1-flink/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc1-flink/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..905e501b8cb66712f2b245470d96803987a9b93b
--- /dev/null
+++ b/theodolite-benchmarks/uc1-flink/src/main/resources/META-INF/application.properties
@@ -0,0 +1,12 @@
+application.name=theodolite-uc1-application
+application.version=0.0.1
+
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+
+schema.registry.url=http://localhost:8081
+
+num.threads=1
+commit.interval.ms=1000
+cache.max.bytes.buffering=-1
diff --git a/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc1-kstreams/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc1-kstreams/.settings/org.eclipse.jdt.ui.prefs
diff --git a/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc1-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc1-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc1-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc1-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/benchmarks/uc1-application/Dockerfile b/theodolite-benchmarks/uc1-kstreams/Dockerfile
similarity index 53%
rename from benchmarks/uc1-application/Dockerfile
rename to theodolite-benchmarks/uc1-kstreams/Dockerfile
index 09c36f42afe730a2fc6ba59bbc2082aa8b715f68..bfabd8ff2073e03beaecba847d5cf6cd1722224f 100644
--- a/benchmarks/uc1-application/Dockerfile
+++ b/theodolite-benchmarks/uc1-kstreams/Dockerfile
@@ -1,7 +1,7 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/uc1-application.tar /
+ADD build/distributions/uc1-kstreams.tar /
 
 
 CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc1-application/bin/uc1-application
\ No newline at end of file
+     /uc1-kstreams/bin/uc1-kstreams
\ No newline at end of file
diff --git a/benchmarks/uc1-application/build.gradle b/theodolite-benchmarks/uc1-kstreams/build.gradle
similarity index 57%
rename from benchmarks/uc1-application/build.gradle
rename to theodolite-benchmarks/uc1-kstreams/build.gradle
index 3b197e85116f41dde5574d9253d60e1146fe44a2..74cfb450ec80759f60582c25ab844e3398d5bf02 100644
--- a/benchmarks/uc1-application/build.gradle
+++ b/theodolite-benchmarks/uc1-kstreams/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.kstreams'
+}
+
 mainClassName = "theodolite.uc1.application.HistoryService"
diff --git a/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java b/theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/application/HistoryService.java
similarity index 100%
rename from benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
rename to theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/application/HistoryService.java
diff --git a/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java b/theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
similarity index 90%
rename from benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
rename to theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
index 75c833aa722654395b1adc6f739395eea5256820..427a838f45f6807ede00dcb68ebf8c5580f28ce6 100644
--- a/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
+++ b/theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
@@ -17,11 +17,11 @@ import titan.ccp.model.records.ActivePowerRecord;
 public class TopologyBuilder {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
+  private static final Gson GSON = new Gson();
 
   private final String inputTopic;
   private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
 
-  private final Gson gson = new Gson();
   private final StreamsBuilder builder = new StreamsBuilder();
 
 
@@ -42,8 +42,8 @@ public class TopologyBuilder {
         .stream(this.inputTopic, Consumed.with(
             Serdes.String(),
             this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
-        .mapValues(v -> this.gson.toJson(v))
-        .foreach((k, v) -> LOGGER.info("Key: " + k + " Value: " + v));
+        .mapValues(v -> GSON.toJson(v))
+        .foreach((k, record) -> LOGGER.info("Record: {}", record));
 
     return this.builder.build(properties);
   }
diff --git a/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java b/theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
similarity index 100%
rename from benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
rename to theodolite-benchmarks/uc1-kstreams/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
diff --git a/benchmarks/uc1-application/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc1-kstreams/src/main/resources/META-INF/application.properties
similarity index 76%
rename from benchmarks/uc1-application/src/main/resources/META-INF/application.properties
rename to theodolite-benchmarks/uc1-kstreams/src/main/resources/META-INF/application.properties
index b46e6246e248cc524c5b6249348c76ded6ec468b..e3371cc87e20e85e6e8c327955537e6e49dab86e 100644
--- a/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
+++ b/theodolite-benchmarks/uc1-kstreams/src/main/resources/META-INF/application.properties
@@ -4,5 +4,5 @@ application.version=0.0.1
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
 
-schema.registry.url=http://localhost:8091
+schema.registry.url=http://localhost:8081
 
diff --git a/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc1-load-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc1-load-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc1-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc1-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc1-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc1-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/uc1-load-generator/Dockerfile b/theodolite-benchmarks/uc1-load-generator/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..2a9195d3b461219f2e0b1805ff4c7f50412132e1
--- /dev/null
+++ b/theodolite-benchmarks/uc1-load-generator/Dockerfile
@@ -0,0 +1,6 @@
+FROM openjdk:11-slim
+
+ADD build/distributions/uc1-load-generator.tar /
+
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc1-load-generator/bin/uc1-load-generator
\ No newline at end of file
diff --git a/benchmarks/uc1-workload-generator/build.gradle b/theodolite-benchmarks/uc1-load-generator/build.gradle
similarity index 56%
rename from benchmarks/uc1-workload-generator/build.gradle
rename to theodolite-benchmarks/uc1-load-generator/build.gradle
index 9cc0bdbf01032efa3b251db06a2837cc9b920675..aadd4796d86dd46ca6094b00479f9f8483fc7e15 100644
--- a/benchmarks/uc1-workload-generator/build.gradle
+++ b/theodolite-benchmarks/uc1-load-generator/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.load-generator'
+}
+
 mainClassName = "theodolite.uc1.workloadgenerator.LoadGenerator"
diff --git a/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/theodolite-benchmarks/uc1-load-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
rename to theodolite-benchmarks/uc1-load-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
diff --git a/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc2-flink/.settings/org.eclipse.jdt.ui.prefs
similarity index 99%
rename from benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc2-flink/.settings/org.eclipse.jdt.ui.prefs
index fa98ca63d77bdee891150bd6713f70197a75cefc..4d01df75552c562406705858b6368ecf59d6e82f 100644
--- a/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
+++ b/theodolite-benchmarks/uc2-flink/.settings/org.eclipse.jdt.ui.prefs
@@ -66,6 +66,7 @@ org.eclipse.jdt.ui.ignorelowercasenames=true
 org.eclipse.jdt.ui.importorder=;
 org.eclipse.jdt.ui.ondemandthreshold=99
 org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
 sp_cleanup.add_default_serial_version_id=true
 sp_cleanup.add_generated_serial_version_id=false
 sp_cleanup.add_missing_annotations=true
diff --git a/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc2-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc2-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc2-flink/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc2-flink/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/uc2-flink/Dockerfile b/theodolite-benchmarks/uc2-flink/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..537ab28e2d4e5fb8edfc2760142acc33cc49b91d
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/Dockerfile
@@ -0,0 +1,3 @@
+FROM flink:1.12-scala_2.12-java11
+
+ADD build/libs/uc2-flink-all.jar /opt/flink/usrlib/artifacts/uc2-flink-all.jar
\ No newline at end of file
diff --git a/theodolite-benchmarks/uc2-flink/build.gradle b/theodolite-benchmarks/uc2-flink/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..680b802ce527d538062658874146ce1f0bd3b1e8
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/build.gradle
@@ -0,0 +1,17 @@
+plugins {
+  id 'theodolite.flink'
+}
+
+allprojects {
+	repositories {
+    	maven {
+    		url 'https://packages.confluent.io/maven/'
+    	}
+	}
+}
+
+dependencies {
+    compile('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT')
+}
+
+mainClassName = "theodolite.uc2.application.HistoryServiceFlinkJob"
diff --git a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/ConfigurationKeys.java b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..e8261062689ce4c586a4e6fbde02878a28f48e97
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/ConfigurationKeys.java
@@ -0,0 +1,37 @@
+package theodolite.uc2.application;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+
+  public static final String APPLICATION_NAME = "application.name";
+
+  public static final String APPLICATION_VERSION = "application.version";
+
+  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+
+  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
+
+  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
+
+  public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
+
+  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
+
+  public static final String FLINK_STATE_BACKEND = "flink.state.backend";
+
+  public static final String FLINK_STATE_BACKEND_PATH = "flink.state.backend.path";
+
+  public static final String FLINK_STATE_BACKEND_MEMORY_SIZE = // NOPMD
+      "flink.state.backend.memory.size";
+
+  public static final String CHECKPOINTING = "checkpointing";
+
+  public static final String PARALLELISM = "parallelism";
+
+  private ConfigurationKeys() {}
+
+}
diff --git a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java
new file mode 100644
index 0000000000000000000000000000000000000000..d156d895d86bb01a31f96e08764df8b8df743c4d
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java
@@ -0,0 +1,135 @@
+package theodolite.uc2.application;
+
+import com.google.common.math.Stats;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.runtime.state.StateBackend;
+import org.apache.flink.streaming.api.TimeCharacteristic;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.flink.KafkaConnectorFactory;
+import theodolite.commons.flink.StateBackends;
+import theodolite.commons.flink.serialization.StatsSerializer;
+import titan.ccp.common.configuration.ServiceConfigurations;
+import titan.ccp.model.records.ActivePowerRecord;
+
+
+/**
+ * The History microservice implemented as a Flink job.
+ */
+public final class HistoryServiceFlinkJob {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(HistoryServiceFlinkJob.class);
+
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
+  private final StreamExecutionEnvironment env;
+  private final String applicationId;
+
+  /**
+   * Create a new instance of the {@link HistoryServiceFlinkJob}.
+   */
+  public HistoryServiceFlinkJob() {
+    final String applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    final String applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.applicationId = applicationName + "-" + applicationVersion;
+
+    this.env = StreamExecutionEnvironment.getExecutionEnvironment();
+
+    this.configureEnv();
+
+    this.buildPipeline();
+  }
+
+  private void configureEnv() {
+    this.env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
+
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+    final int commitIntervalMs = this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS);
+    if (checkpointing) {
+      this.env.enableCheckpointing(commitIntervalMs);
+    }
+
+    // Parallelism
+    final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
+    if (parallelism != null) {
+      LOGGER.info("Set parallelism: {}.", parallelism);
+      this.env.setParallelism(parallelism);
+    }
+
+    // State Backend
+    final StateBackend stateBackend = StateBackends.fromConfiguration(this.config);
+    this.env.setStateBackend(stateBackend);
+
+    this.configureSerializers();
+  }
+
+  private void configureSerializers() {
+    this.env.getConfig().registerTypeWithKryoSerializer(Stats.class, new StatsSerializer());
+    this.env.getConfig().getRegisteredTypesWithKryoSerializers()
+        .forEach((c, s) -> LOGGER.info("Class " + c.getName() + " registered with serializer "
+            + s.getSerializer().getClass().getName()));
+
+  }
+
+  private void buildPipeline() {
+    final String kafkaBroker = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    final String schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    final String inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+    final String outputTopic = this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC);
+    final int windowDurationMinutes =
+        this.config.getInt(ConfigurationKeys.KAFKA_WINDOW_DURATION_MINUTES);
+    final Time windowDuration = Time.minutes(windowDurationMinutes);
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+
+    final KafkaConnectorFactory kafkaConnector = new KafkaConnectorFactory(
+        this.applicationId, kafkaBroker, checkpointing, schemaRegistryUrl);
+
+    final FlinkKafkaConsumer<ActivePowerRecord> kafkaSource =
+        kafkaConnector.createConsumer(inputTopic, ActivePowerRecord.class);
+
+    final FlinkKafkaProducer<Tuple2<String, String>> kafkaSink =
+        kafkaConnector.createProducer(outputTopic,
+            Serdes::String,
+            Serdes::String,
+            Types.TUPLE(Types.STRING, Types.STRING));
+
+    this.env
+        .addSource(kafkaSource).name("[Kafka Consumer] Topic: " + inputTopic)
+        // .rebalance()
+        .keyBy(ActivePowerRecord::getIdentifier)
+        .window(TumblingEventTimeWindows.of(windowDuration))
+        .aggregate(new StatsAggregateFunction(), new StatsProcessWindowFunction())
+        .map(t -> {
+          final String key = t.f0;
+          final String value = t.f1.toString();
+          LOGGER.info("{}: {}", key, value);
+          return new Tuple2<>(key, value);
+        }).name("map").returns(Types.TUPLE(Types.STRING, Types.STRING))
+        .addSink(kafkaSink).name("[Kafka Producer] Topic: " + outputTopic);
+  }
+
+
+  /**
+   * Start running this microservice.
+   */
+  public void run() {
+    LOGGER.info("Execution plan: {}", this.env.getExecutionPlan());
+
+    try {
+      this.env.execute(this.applicationId);
+    } catch (final Exception e) { // NOPMD Execution thrown by Flink
+      LOGGER.error("An error occured while running this job.", e);
+    }
+  }
+
+  public static void main(final String[] args) {
+    new HistoryServiceFlinkJob().run();
+  }
+}
diff --git a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/StatsAggregateFunction.java b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/StatsAggregateFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..7bd090de819ce0c0c73687bd53a191b66ae31ed9
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/StatsAggregateFunction.java
@@ -0,0 +1,38 @@
+package theodolite.uc2.application;
+
+import com.google.common.math.Stats;
+import com.google.common.math.StatsAccumulator;
+import org.apache.flink.api.common.functions.AggregateFunction;
+import theodolite.uc2.application.util.StatsFactory;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * Statistical aggregation of {@link ActivePowerRecord}s using {@link Stats}.
+ */
+public class StatsAggregateFunction implements AggregateFunction<ActivePowerRecord, Stats, Stats> {
+
+  private static final long serialVersionUID = -8873572990921515499L; // NOPMD
+
+  @Override
+  public Stats createAccumulator() {
+    return Stats.of();
+  }
+
+  @Override
+  public Stats add(final ActivePowerRecord value, final Stats accumulator) {
+    return StatsFactory.accumulate(accumulator, value.getValueInW());
+  }
+
+  @Override
+  public Stats getResult(final Stats accumulator) {
+    return accumulator;
+  }
+
+  @Override
+  public Stats merge(final Stats a, final Stats b) {
+    final StatsAccumulator statsAccumulator = new StatsAccumulator();
+    statsAccumulator.addAll(a);
+    statsAccumulator.addAll(b);
+    return statsAccumulator.snapshot();
+  }
+}
diff --git a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/StatsProcessWindowFunction.java b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/StatsProcessWindowFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..d422c37b667d9d3309f0dd858758db29051807b9
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/StatsProcessWindowFunction.java
@@ -0,0 +1,24 @@
+package theodolite.uc2.application;
+
+import com.google.common.math.Stats;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
+import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
+import org.apache.flink.util.Collector;
+
+/**
+ * A {@link ProcessWindowFunction} that forwards a computed {@link Stats} object along with its
+ * associated key.
+ */
+public class StatsProcessWindowFunction
+    extends ProcessWindowFunction<Stats, Tuple2<String, Stats>, String, TimeWindow> {
+
+  private static final long serialVersionUID = 4363099880614593379L; // NOPMD
+
+  @Override
+  public void process(final String key, final Context context, final Iterable<Stats> elements,
+      final Collector<Tuple2<String, Stats>> out) {
+    final Stats stats = elements.iterator().next();
+    out.collect(new Tuple2<>(key, stats));
+  }
+}
diff --git a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/util/StatsFactory.java b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/util/StatsFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..9697108eb8dacabf925f06067199a41eb0658dbe
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/util/StatsFactory.java
@@ -0,0 +1,23 @@
+package theodolite.uc2.application.util;
+
+import com.google.common.math.Stats;
+import com.google.common.math.StatsAccumulator;
+
+/**
+ * Factory methods for working with {@link Stats}.
+ */
+public final class StatsFactory {
+
+  private StatsFactory() {}
+
+  /**
+   * Add a value to a {@link Stats} object.
+   */
+  public static Stats accumulate(final Stats stats, final double value) {
+    final StatsAccumulator statsAccumulator = new StatsAccumulator();
+    statsAccumulator.addAll(stats);
+    statsAccumulator.add(value);
+    return statsAccumulator.snapshot();
+  }
+
+}
diff --git a/theodolite-benchmarks/uc2-flink/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc2-flink/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..f971390984ee41be1fce54e62f4f43ee2b9c02da
--- /dev/null
+++ b/theodolite-benchmarks/uc2-flink/src/main/resources/META-INF/application.properties
@@ -0,0 +1,11 @@
+application.name=theodolite-uc2-application
+application.version=0.0.1
+
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+schema.registry.url=http://localhost:8081
+num.threads=1
+commit.interval.ms=100
+cache.max.bytes.buffering=-1
+kafka.window.duration.minutes=1
\ No newline at end of file
diff --git a/benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc2-kstreams/.settings/org.eclipse.jdt.ui.prefs
similarity index 99%
rename from benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc2-kstreams/.settings/org.eclipse.jdt.ui.prefs
index fa98ca63d77bdee891150bd6713f70197a75cefc..4d01df75552c562406705858b6368ecf59d6e82f 100644
--- a/benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
+++ b/theodolite-benchmarks/uc2-kstreams/.settings/org.eclipse.jdt.ui.prefs
@@ -66,6 +66,7 @@ org.eclipse.jdt.ui.ignorelowercasenames=true
 org.eclipse.jdt.ui.importorder=;
 org.eclipse.jdt.ui.ondemandthreshold=99
 org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
 sp_cleanup.add_default_serial_version_id=true
 sp_cleanup.add_generated_serial_version_id=false
 sp_cleanup.add_missing_annotations=true
diff --git a/benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc2-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc2-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc2-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc2-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/benchmarks/uc2-application/Dockerfile b/theodolite-benchmarks/uc2-kstreams/Dockerfile
similarity index 53%
rename from benchmarks/uc2-application/Dockerfile
rename to theodolite-benchmarks/uc2-kstreams/Dockerfile
index 5177dcede26016990b73467460fd358823c43c76..0c888bb541151da3299a86c9157ca5c6af36a088 100644
--- a/benchmarks/uc2-application/Dockerfile
+++ b/theodolite-benchmarks/uc2-kstreams/Dockerfile
@@ -1,6 +1,6 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/uc2-application.tar /
+ADD build/distributions/uc2-kstreams.tar /
 
 CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc2-application/bin/uc2-application
\ No newline at end of file
+     /uc2-kstreams/bin/uc2-kstreams
\ No newline at end of file
diff --git a/benchmarks/uc2-application/build.gradle b/theodolite-benchmarks/uc2-kstreams/build.gradle
similarity index 57%
rename from benchmarks/uc2-application/build.gradle
rename to theodolite-benchmarks/uc2-kstreams/build.gradle
index e4d3f5346e401def9c9a5a49820d0682eafb0ad3..6688f229b3c57f95aaaf5f5cd4ca615db609277a 100644
--- a/benchmarks/uc2-application/build.gradle
+++ b/theodolite-benchmarks/uc2-kstreams/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.kstreams'
+}
+
 mainClassName = "theodolite.uc2.application.HistoryService"
diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/HistoryService.java b/theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/application/HistoryService.java
similarity index 100%
rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/application/HistoryService.java
rename to theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/application/HistoryService.java
diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
rename to theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
similarity index 100%
rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
rename to theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java b/theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java
rename to theodolite-benchmarks/uc2-kstreams/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java
diff --git a/benchmarks/uc2-application/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc2-kstreams/src/main/resources/META-INF/application.properties
similarity index 82%
rename from benchmarks/uc2-application/src/main/resources/META-INF/application.properties
rename to theodolite-benchmarks/uc2-kstreams/src/main/resources/META-INF/application.properties
index 15293b1387b96688401bbc48bc2d1615c7b63aba..1b59528db59653d8dc0c2a04d242a0cd39fe07da 100644
--- a/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
+++ b/theodolite-benchmarks/uc2-kstreams/src/main/resources/META-INF/application.properties
@@ -6,4 +6,4 @@ kafka.input.topic=input
 kafka.output.topic=output
 kafka.window.duration.minutes=1
 
-schema.registry.url=http://localhost:8091
+schema.registry.url=http://localhost:8081
diff --git a/theodolite-benchmarks/uc2-load-generator/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc2-load-generator/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..4d01df75552c562406705858b6368ecf59d6e82f
--- /dev/null
+++ b/theodolite-benchmarks/uc2-load-generator/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,128 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=false
+sp_cleanup.remove_redundant_semicolons=true
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc2-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc2-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc2-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc2-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/uc2-load-generator/Dockerfile b/theodolite-benchmarks/uc2-load-generator/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..e9ee95af7063a92a02ac2e417288ad1505a2dba6
--- /dev/null
+++ b/theodolite-benchmarks/uc2-load-generator/Dockerfile
@@ -0,0 +1,6 @@
+FROM openjdk:11-slim
+
+ADD build/distributions/uc2-load-generator.tar /
+
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc2-load-generator/bin/uc2-load-generator
\ No newline at end of file
diff --git a/benchmarks/uc2-workload-generator/build.gradle b/theodolite-benchmarks/uc2-load-generator/build.gradle
similarity index 56%
rename from benchmarks/uc2-workload-generator/build.gradle
rename to theodolite-benchmarks/uc2-load-generator/build.gradle
index f2c3e5d2e73b655dffd94222ecfbc4fc31b7f722..1954fe5cbfd62d26f27c59be486a516c91892e18 100644
--- a/benchmarks/uc2-workload-generator/build.gradle
+++ b/theodolite-benchmarks/uc2-load-generator/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.load-generator'
+}
+
 mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator"
diff --git a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/theodolite-benchmarks/uc2-load-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
rename to theodolite-benchmarks/uc2-load-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
diff --git a/theodolite-benchmarks/uc3-flink/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc3-flink/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..4d01df75552c562406705858b6368ecf59d6e82f
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,128 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=false
+sp_cleanup.remove_redundant_semicolons=true
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc3-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to theodolite-benchmarks/uc3-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc3-flink/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to theodolite-benchmarks/uc3-flink/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/theodolite-benchmarks/uc3-flink/Dockerfile b/theodolite-benchmarks/uc3-flink/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..34c6da692cb30b738adf47b9d4ca893e72f330e4
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/Dockerfile
@@ -0,0 +1,3 @@
+FROM flink:1.12-scala_2.12-java11
+
+ADD build/libs/uc3-flink-all.jar /opt/flink/usrlib/artifacts/uc3-flink-all.jar
\ No newline at end of file
diff --git a/theodolite-benchmarks/uc3-flink/build.gradle b/theodolite-benchmarks/uc3-flink/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..7f74b4b6f3d75f213d2fae868775423381076641
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/build.gradle
@@ -0,0 +1,17 @@
+plugins {
+  id 'theodolite.flink'
+}
+
+allprojects {
+	repositories {
+    	maven {
+    		url 'https://packages.confluent.io/maven/'
+    	}
+	}
+}
+
+dependencies {
+    compile('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT')
+}
+
+mainClassName = "theodolite.uc3.application.HistoryServiceFlinkJob"
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/ConfigurationKeys.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..bc4e0b9d2d230026e9d2b6df0a11e4fb68380aed
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
@@ -0,0 +1,41 @@
+package theodolite.uc3.application;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+
+  public static final String APPLICATION_NAME = "application.name";
+
+  public static final String APPLICATION_VERSION = "application.version";
+
+  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+
+  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
+
+  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
+
+  public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
+
+  public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
+
+  public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
+
+  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+  public static final String TIME_ZONE = "time.zone";
+
+  public static final String FLINK_STATE_BACKEND = "flink.state.backend";
+
+  public static final String FLINK_STATE_BACKEND_PATH = "flink.state.backend.path";
+
+  public static final String FLINK_STATE_BACKEND_MEMORY_SIZE = // NOPMD
+      "flink.state.backend.memory.size";
+
+  public static final String CHECKPOINTING = "checkpointing";
+
+  public static final String PARALLELISM = "parallelism";
+
+  private ConfigurationKeys() {}
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java
new file mode 100644
index 0000000000000000000000000000000000000000..091b25674a2a31671ca68bd2076c694da9533d77
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java
@@ -0,0 +1,159 @@
+package theodolite.uc3.application;
+
+import com.google.common.math.Stats;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.runtime.state.StateBackend;
+import org.apache.flink.streaming.api.TimeCharacteristic;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.api.windowing.assigners.SlidingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.flink.KafkaConnectorFactory;
+import theodolite.commons.flink.StateBackends;
+import theodolite.commons.flink.serialization.StatsSerializer;
+import theodolite.uc3.application.util.HourOfDayKey;
+import theodolite.uc3.application.util.HourOfDayKeyFactory;
+import theodolite.uc3.application.util.HourOfDayKeySerde;
+import theodolite.uc3.application.util.StatsKeyFactory;
+import titan.ccp.common.configuration.ServiceConfigurations;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * The History microservice implemented as a Flink job.
+ */
+public final class HistoryServiceFlinkJob {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(HistoryServiceFlinkJob.class);
+
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
+  private final StreamExecutionEnvironment env;
+  private final String applicationId;
+
+  /**
+   * Create a new instance of the {@link HistoryServiceFlinkJob}.
+   */
+  public HistoryServiceFlinkJob() {
+    final String applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    final String applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.applicationId = applicationName + "-" + applicationVersion;
+
+    this.env = StreamExecutionEnvironment.getExecutionEnvironment();
+
+    this.configureEnv();
+
+    this.buildPipeline();
+  }
+
+  private void configureEnv() {
+    this.env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
+
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+    final int commitIntervalMs = this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS);
+    if (checkpointing) {
+      this.env.enableCheckpointing(commitIntervalMs);
+    }
+
+    // Parallelism
+    final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
+    if (parallelism != null) {
+      LOGGER.error("Set parallelism: {}.", parallelism);
+      this.env.setParallelism(parallelism);
+    }
+
+    // State Backend
+    final StateBackend stateBackend = StateBackends.fromConfiguration(this.config);
+    this.env.setStateBackend(stateBackend);
+
+    this.configureSerializers();
+  }
+
+  private void configureSerializers() {
+    this.env.getConfig().registerTypeWithKryoSerializer(HourOfDayKey.class,
+        new HourOfDayKeySerde());
+    this.env.getConfig().registerTypeWithKryoSerializer(Stats.class, new StatsSerializer());
+    for (final var entry : this.env.getConfig().getRegisteredTypesWithKryoSerializers()
+        .entrySet()) {
+      LOGGER.info("Class {} registered with serializer {}.",
+          entry.getKey().getName(),
+          entry.getValue().getSerializer().getClass().getName());
+    }
+  }
+
+  private void buildPipeline() {
+    // Configurations
+    final String kafkaBroker = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    final String schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    final String inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+    final String outputTopic = this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC);
+    final ZoneId timeZone = ZoneId.of(this.config.getString(ConfigurationKeys.TIME_ZONE));
+    final Time aggregationDuration =
+        Time.days(this.config.getInt(ConfigurationKeys.AGGREGATION_DURATION_DAYS));
+    final Time aggregationAdvance =
+        Time.days(this.config.getInt(ConfigurationKeys.AGGREGATION_ADVANCE_DAYS));
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+
+    final KafkaConnectorFactory kafkaConnector = new KafkaConnectorFactory(
+        this.applicationId, kafkaBroker, checkpointing, schemaRegistryUrl);
+
+    // Sources and Sinks
+    final FlinkKafkaConsumer<ActivePowerRecord> kafkaSource =
+        kafkaConnector.createConsumer(inputTopic, ActivePowerRecord.class);
+    final FlinkKafkaProducer<Tuple2<String, String>> kafkaSink =
+        kafkaConnector.createProducer(outputTopic,
+            Serdes::String,
+            Serdes::String,
+            Types.TUPLE(Types.STRING, Types.STRING));
+
+    // Streaming topology
+    final StatsKeyFactory<HourOfDayKey> keyFactory = new HourOfDayKeyFactory();
+    this.env
+        .addSource(kafkaSource).name("[Kafka Consumer] Topic: " + inputTopic)
+        // .rebalance()
+        .keyBy((KeySelector<ActivePowerRecord, HourOfDayKey>) record -> {
+          final Instant instant = Instant.ofEpochMilli(record.getTimestamp());
+          final LocalDateTime dateTime = LocalDateTime.ofInstant(instant, timeZone);
+          return keyFactory.createKey(record.getIdentifier(), dateTime);
+        })
+        .window(SlidingEventTimeWindows.of(aggregationDuration, aggregationAdvance))
+        .aggregate(new StatsAggregateFunction(), new HourOfDayProcessWindowFunction())
+        .map(tuple -> {
+          final String newKey = keyFactory.getSensorId(tuple.f0);
+          final String newValue = tuple.f1.toString();
+          final int hourOfDay = tuple.f0.getHourOfDay();
+          LOGGER.info("{}|{}: {}", newKey, hourOfDay, newValue);
+          return new Tuple2<>(newKey, newValue);
+        })
+        .name("map")
+        .returns(Types.TUPLE(Types.STRING, Types.STRING))
+        .addSink(kafkaSink).name("[Kafka Producer] Topic: " + outputTopic);
+  }
+
+  /**
+   * Start running this microservice.
+   */
+  public void run() {
+    // Execution plan
+    LOGGER.info("Execution Plan: {}", this.env.getExecutionPlan());
+
+    // Execute Job
+    try {
+      this.env.execute(this.applicationId);
+    } catch (final Exception e) { // NOPMD Execution thrown by Flink
+      LOGGER.error("An error occured while running this job.", e);
+    }
+  }
+
+  public static void main(final String[] args) {
+    new HistoryServiceFlinkJob().run();
+  }
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HourOfDayProcessWindowFunction.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HourOfDayProcessWindowFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..349c63413d0da792ad34e8ec8d94e7ff5dc06a42
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HourOfDayProcessWindowFunction.java
@@ -0,0 +1,28 @@
+package theodolite.uc3.application;
+
+import com.google.common.math.Stats;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
+import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
+import org.apache.flink.util.Collector;
+import theodolite.uc3.application.util.HourOfDayKey;
+
+/**
+ * A {@link ProcessWindowFunction} that forwards a computed {@link Stats} object along with its
+ * associated key.
+ */
+public class HourOfDayProcessWindowFunction
+    extends ProcessWindowFunction<Stats, Tuple2<HourOfDayKey, Stats>, HourOfDayKey, TimeWindow> {
+
+  private static final long serialVersionUID = 7702216563302727315L; // NOPMD
+
+  @Override
+  public void process(final HourOfDayKey hourOfDayKey,
+      final Context context,
+      final Iterable<Stats> elements,
+      final Collector<Tuple2<HourOfDayKey, Stats>> out) {
+    final Stats stats = elements.iterator().next();
+    out.collect(new Tuple2<>(hourOfDayKey, stats));
+  }
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/StatsAggregateFunction.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/StatsAggregateFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..4706da0a9491e0391f25cd61639c3bb565509cb1
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/StatsAggregateFunction.java
@@ -0,0 +1,38 @@
+package theodolite.uc3.application;
+
+import com.google.common.math.Stats;
+import com.google.common.math.StatsAccumulator;
+import org.apache.flink.api.common.functions.AggregateFunction;
+import theodolite.uc3.application.util.StatsFactory;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * Statistical aggregation of {@link ActivePowerRecord}s using {@link Stats}.
+ */
+public class StatsAggregateFunction implements AggregateFunction<ActivePowerRecord, Stats, Stats> {
+
+  private static final long serialVersionUID = -8873572990921515499L; // NOPMD
+
+  @Override
+  public Stats createAccumulator() {
+    return Stats.of();
+  }
+
+  @Override
+  public Stats add(final ActivePowerRecord value, final Stats accumulator) {
+    return StatsFactory.accumulate(accumulator, value.getValueInW());
+  }
+
+  @Override
+  public Stats getResult(final Stats accumulator) {
+    return accumulator;
+  }
+
+  @Override
+  public Stats merge(final Stats a, final Stats b) {
+    final StatsAccumulator statsAccumulator = new StatsAccumulator();
+    statsAccumulator.addAll(a);
+    statsAccumulator.addAll(b);
+    return statsAccumulator.snapshot();
+  }
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKey.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..5def88b404f23a59955ca2de42b91c22b7b1b53d
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKey.java
@@ -0,0 +1,79 @@
+package theodolite.uc3.application.util;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.Objects;
+
+/**
+ * Composed key of an hour of the day and a sensor id.
+ */
+public class HourOfDayKey {
+
+  private final int hourOfDay;
+  private final String sensorId;
+
+  public HourOfDayKey(final int hourOfDay, final String sensorId) {
+    this.hourOfDay = hourOfDay;
+    this.sensorId = sensorId;
+  }
+
+  public int getHourOfDay() {
+    return this.hourOfDay;
+  }
+
+  public String getSensorId() {
+    return this.sensorId;
+  }
+
+  @Override
+  public String toString() {
+    return this.sensorId + ";" + this.hourOfDay;
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.hourOfDay, this.sensorId);
+  }
+
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (!(obj instanceof HourOfDayKey)) {
+      return false;
+    }
+    final HourOfDayKey k = (HourOfDayKey) obj;
+    return this.hourOfDay == k.hourOfDay && this.sensorId.equals(k.sensorId);
+  }
+
+  /**
+   * Convert this {@link HourOfDayKey} into a byte array. This method is the inverse to
+   * {@code HourOfDayKey#fromByteArray()}.
+   */
+  public byte[] toByteArray() {
+    final int numBytes = (2 * Integer.SIZE + this.sensorId.length() * Character.SIZE) / Byte.SIZE;
+    final ByteBuffer buffer = ByteBuffer.allocate(numBytes).order(ByteOrder.LITTLE_ENDIAN);
+    buffer.putInt(this.hourOfDay);
+    buffer.putInt(this.sensorId.length());
+    for (final char c : this.sensorId.toCharArray()) {
+      buffer.putChar(c);
+    }
+    return buffer.array();
+  }
+
+  /**
+   * Construct a new {@link HourOfDayKey} from a byte array. This method is the inverse to
+   * {@code HourOfDayKey#toByteArray()}.
+   */
+  public static HourOfDayKey fromByteArray(final byte[] bytes) {
+    final ByteBuffer buffer = ByteBuffer.wrap(bytes).order(ByteOrder.LITTLE_ENDIAN);
+    final int hourOfDay = buffer.getInt();
+    final int strLen = buffer.getInt();
+    final char[] sensorId = new char[strLen];
+    for (int i = 0; i < strLen; i++) {
+      sensorId[i] = buffer.getChar();
+    }
+    return new HourOfDayKey(hourOfDay, new String(sensorId));
+  }
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKeyFactory.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKeyFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..bd67b2508bc91a87635c52e95b963ed908ed92bf
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKeyFactory.java
@@ -0,0 +1,24 @@
+package theodolite.uc3.application.util;
+
+import java.io.Serializable;
+import java.time.LocalDateTime;
+
+/**
+ * {@link StatsKeyFactory} for {@link HourOfDayKey}.
+ */
+public class HourOfDayKeyFactory implements StatsKeyFactory<HourOfDayKey>, Serializable {
+
+  private static final long serialVersionUID = 4357668496473645043L; // NOPMD
+
+  @Override
+  public HourOfDayKey createKey(final String sensorId, final LocalDateTime dateTime) {
+    final int hourOfDay = dateTime.getHour();
+    return new HourOfDayKey(hourOfDay, sensorId);
+  }
+
+  @Override
+  public String getSensorId(final HourOfDayKey key) {
+    return key.getSensorId();
+  }
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKeySerde.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKeySerde.java
new file mode 100644
index 0000000000000000000000000000000000000000..6e3ae9f754d2b1d4ab10349040f0c9e51134c4f7
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayKeySerde.java
@@ -0,0 +1,52 @@
+package theodolite.uc3.application.util;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import java.io.Serializable;
+import org.apache.kafka.common.serialization.Serde;
+import titan.ccp.common.kafka.simpleserdes.BufferSerde;
+import titan.ccp.common.kafka.simpleserdes.ReadBuffer;
+import titan.ccp.common.kafka.simpleserdes.SimpleSerdes;
+import titan.ccp.common.kafka.simpleserdes.WriteBuffer;
+
+/**
+ * {@link BufferSerde} for a {@link HourOfDayKey}. Use the {@link #create()} method to create a new
+ * Kafka {@link Serde}.
+ */
+public class HourOfDayKeySerde extends Serializer<HourOfDayKey>
+    implements BufferSerde<HourOfDayKey>, Serializable {
+
+  private static final long serialVersionUID = 1262778284661945041L; // NOPMD
+
+  @Override
+  public void serialize(final WriteBuffer buffer, final HourOfDayKey data) {
+    buffer.putInt(data.getHourOfDay());
+    buffer.putString(data.getSensorId());
+  }
+
+  @Override
+  public HourOfDayKey deserialize(final ReadBuffer buffer) {
+    final int hourOfDay = buffer.getInt();
+    final String sensorId = buffer.getString();
+    return new HourOfDayKey(hourOfDay, sensorId);
+  }
+
+  public static Serde<HourOfDayKey> create() {
+    return SimpleSerdes.create(new HourOfDayKeySerde());
+  }
+
+  @Override
+  public void write(final Kryo kryo, final Output output, final HourOfDayKey object) {
+    final byte[] data = object.toByteArray();
+    output.writeInt(data.length);
+    output.writeBytes(data);
+  }
+
+  @Override
+  public HourOfDayKey read(final Kryo kryo, final Input input, final Class<HourOfDayKey> type) {
+    final int numBytes = input.readInt();
+    return HourOfDayKey.fromByteArray(input.readBytes(numBytes));
+  }
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayRecordFactory.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayRecordFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8a42b74e5ca1cc55f9f21de62a5d8f877223e62
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/HourOfDayRecordFactory.java
@@ -0,0 +1,28 @@
+package theodolite.uc3.application.util;
+
+import com.google.common.math.Stats;
+import org.apache.kafka.streams.kstream.Windowed;
+import titan.ccp.model.records.HourOfDayActivePowerRecord;
+
+/**
+ * {@link StatsRecordFactory} to create an {@link HourOfDayActivePowerRecord}.
+ */
+public class HourOfDayRecordFactory
+    implements StatsRecordFactory<HourOfDayKey, HourOfDayActivePowerRecord> {
+
+  @Override
+  public HourOfDayActivePowerRecord create(final Windowed<HourOfDayKey> windowed,
+      final Stats stats) {
+    return new HourOfDayActivePowerRecord(
+        windowed.key().getSensorId(),
+        windowed.key().getHourOfDay(),
+        windowed.window().start(),
+        windowed.window().end(),
+        stats.count(),
+        stats.mean(),
+        stats.populationVariance(),
+        stats.min(),
+        stats.max());
+  }
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsFactory.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..b7880be4eb48035959251cc56273d16407bcb888
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsFactory.java
@@ -0,0 +1,23 @@
+package theodolite.uc3.application.util;
+
+import com.google.common.math.Stats;
+import com.google.common.math.StatsAccumulator;
+
+/**
+ * Factory methods for working with {@link Stats}.
+ */
+public final class StatsFactory {
+
+  private StatsFactory() {}
+
+  /**
+   * Add a value to a {@link Stats} object.
+   */
+  public static Stats accumulate(final Stats stats, final double value) {
+    final StatsAccumulator statsAccumulator = new StatsAccumulator();
+    statsAccumulator.addAll(stats);
+    statsAccumulator.add(value);
+    return statsAccumulator.snapshot();
+  }
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsKeyFactory.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsKeyFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..fdebccaa2d116253c41492cab3443057adef7b36
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsKeyFactory.java
@@ -0,0 +1,17 @@
+package theodolite.uc3.application.util;
+
+import java.time.LocalDateTime;
+
+/**
+ * Factory interface for creating a stats key from a sensor id and a {@link LocalDateTime} object
+ * and vice versa.
+ *
+ * @param <T> Type of the key
+ */
+public interface StatsKeyFactory<T> {
+
+  T createKey(String sensorId, LocalDateTime dateTime);
+
+  String getSensorId(T key);
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsRecordFactory.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsRecordFactory.java
new file mode 100644
index 0000000000000000000000000000000000000000..61333c99966b1ffea608d225f17d8460eac9ada1
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/util/StatsRecordFactory.java
@@ -0,0 +1,22 @@
+package theodolite.uc3.application.util;
+
+import com.google.common.math.Stats;
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.kafka.streams.kstream.Window;
+import org.apache.kafka.streams.kstream.Windowed;
+
+/**
+ * Factory interface for creating a stats Avro record from a {@link Windowed} and a {@link Stats}.
+ * The {@link Windowed} contains about information about the start end end of the {@link Window} as
+ * well as the sensor id and the aggregated time unit. The {@link Stats} objects contains the actual
+ * aggregation results.
+ *
+ * @param <K> Key type of the {@link Windowed}
+ * @param <R> Avro record type
+ */
+@FunctionalInterface
+public interface StatsRecordFactory<K, R extends SpecificRecord> {
+
+  R create(Windowed<K> windowed, Stats stats);
+
+}
diff --git a/theodolite-benchmarks/uc3-flink/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc3-flink/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..6b6874674ce6a0abea73ea6d983c00c15deb8bb1
--- /dev/null
+++ b/theodolite-benchmarks/uc3-flink/src/main/resources/META-INF/application.properties
@@ -0,0 +1,13 @@
+application.name=theodolite-uc3-application
+application.version=0.0.1
+
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+schema.registry.url=http://localhost:8081
+aggregation.duration.days=30
+aggregation.advance.days=1
+num.threads=1
+commit.interval.ms=100
+cache.max.bytes.buffering=-1
+time.zone=Europe/Paris
\ No newline at end of file
diff --git a/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc3-kstreams/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc3-kstreams/.settings/org.eclipse.jdt.ui.prefs
diff --git a/theodolite-benchmarks/uc3-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc3-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/theodolite-benchmarks/uc3-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/theodolite-benchmarks/uc3-kstreams/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc3-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/theodolite-benchmarks/uc3-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/benchmarks/uc3-application/Dockerfile b/theodolite-benchmarks/uc3-kstreams/Dockerfile
similarity index 52%
rename from benchmarks/uc3-application/Dockerfile
rename to theodolite-benchmarks/uc3-kstreams/Dockerfile
index 61141baaf752af4b596c8a04cd0d7cc2e6d740af..30d6994b0214c8ff3576a79781654b9018fdf93a 100644
--- a/benchmarks/uc3-application/Dockerfile
+++ b/theodolite-benchmarks/uc3-kstreams/Dockerfile
@@ -1,6 +1,6 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/uc3-application.tar /
+ADD build/distributions/uc3-kstreams.tar /
 
 CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc3-application/bin/uc3-application
+     /uc3-kstreams/bin/uc3-kstreams
diff --git a/benchmarks/uc3-application/build.gradle b/theodolite-benchmarks/uc3-kstreams/build.gradle
similarity index 57%
rename from benchmarks/uc3-application/build.gradle
rename to theodolite-benchmarks/uc3-kstreams/build.gradle
index aa96b6dbf90c4895dfda57a51c753c9103c29414..d588d85ae88e3efd2b687e44e9eb9561a45cd8c0 100644
--- a/benchmarks/uc3-application/build.gradle
+++ b/theodolite-benchmarks/uc3-kstreams/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.kstreams'
+}
+
 mainClassName = "theodolite.uc3.application.HistoryService"
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/application/HistoryService.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/application/HistoryService.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java b/theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
rename to theodolite-benchmarks/uc3-kstreams/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
diff --git a/benchmarks/uc3-application/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc3-kstreams/src/main/resources/META-INF/application.properties
similarity index 83%
rename from benchmarks/uc3-application/src/main/resources/META-INF/application.properties
rename to theodolite-benchmarks/uc3-kstreams/src/main/resources/META-INF/application.properties
index 1273441a61763325c812541e1af8c243f81a31a5..0ce745fb61f87016aee5cc242c03069924ceb58e 100644
--- a/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
+++ b/theodolite-benchmarks/uc3-kstreams/src/main/resources/META-INF/application.properties
@@ -7,4 +7,4 @@ kafka.output.topic=output
 aggregation.duration.days=30
 aggregation.advance.days=1
 
-schema.registry.url=http://localhost:8091
+schema.registry.url=http://localhost:8081
diff --git a/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc3-load-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc3-load-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/theodolite-benchmarks/uc3-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc3-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/theodolite-benchmarks/uc3-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/theodolite-benchmarks/uc3-load-generator/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc3-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/theodolite-benchmarks/uc3-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/theodolite-benchmarks/uc3-load-generator/Dockerfile b/theodolite-benchmarks/uc3-load-generator/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..a2ab45d350bc01b4f526912fbb39f6f7c730f290
--- /dev/null
+++ b/theodolite-benchmarks/uc3-load-generator/Dockerfile
@@ -0,0 +1,6 @@
+FROM openjdk:11-slim
+
+ADD build/distributions/uc3-load-generator.tar /
+
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc3-load-generator/bin/uc3-load-generator
diff --git a/benchmarks/uc3-workload-generator/build.gradle b/theodolite-benchmarks/uc3-load-generator/build.gradle
similarity index 56%
rename from benchmarks/uc3-workload-generator/build.gradle
rename to theodolite-benchmarks/uc3-load-generator/build.gradle
index c3ca94290c8600d8482210362666efc1249b8f02..fddc53ae8273a44d178e8828a38a503196af9208 100644
--- a/benchmarks/uc3-workload-generator/build.gradle
+++ b/theodolite-benchmarks/uc3-load-generator/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.load-generator'
+}
+
 mainClassName = "theodolite.uc3.workloadgenerator.LoadGenerator"
diff --git a/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/theodolite-benchmarks/uc3-load-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
rename to theodolite-benchmarks/uc3-load-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
diff --git a/theodolite-benchmarks/uc4-flink/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc4-flink/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..272e01533f6a345d53d2635c47e38c6d3c33dc8a
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,128 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=true
+sp_cleanup.remove_redundant_semicolons=true
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/theodolite-benchmarks/uc4-flink/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc4-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/theodolite-benchmarks/uc4-flink/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc4-flink/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/theodolite-benchmarks/uc4-flink/Dockerfile b/theodolite-benchmarks/uc4-flink/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..4f51f379e5da436104bb8c914e3233d6ecb4ec1f
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/Dockerfile
@@ -0,0 +1,3 @@
+FROM flink:1.12-scala_2.12-java11
+
+ADD build/libs/uc4-flink-all.jar /opt/flink/usrlib/artifacts/uc4-flink-all.jar
\ No newline at end of file
diff --git a/theodolite-benchmarks/uc4-flink/build.gradle b/theodolite-benchmarks/uc4-flink/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..ebc7ca5f30a668fd161bb22f95133452b5061441
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/build.gradle
@@ -0,0 +1,5 @@
+plugins {
+  id 'theodolite.flink'
+}
+
+mainClassName = "theodolite.uc4.application.AggregationServiceFlinkJob"
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java
new file mode 100644
index 0000000000000000000000000000000000000000..3e2878a893057024de00333492462f5029eb6d77
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java
@@ -0,0 +1,224 @@
+package theodolite.uc4.application; // NOPMD Imports required
+
+import java.time.Duration;
+import java.util.Set;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.flink.api.common.eventtime.WatermarkStrategy;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.common.typeinfo.Types;
+import org.apache.flink.api.java.functions.KeySelector;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.runtime.state.StateBackend;
+import org.apache.flink.streaming.api.TimeCharacteristic;
+import org.apache.flink.streaming.api.datastream.DataStream;
+import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
+import org.apache.flink.streaming.api.windowing.assigners.TumblingEventTimeWindows;
+import org.apache.flink.streaming.api.windowing.time.Time;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
+import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.flink.KafkaConnectorFactory;
+import theodolite.commons.flink.StateBackends;
+import theodolite.commons.flink.TupleType;
+import theodolite.uc4.application.util.ImmutableSensorRegistrySerializer;
+import theodolite.uc4.application.util.ImmutableSetSerializer;
+import theodolite.uc4.application.util.SensorParentKey;
+import theodolite.uc4.application.util.SensorParentKeySerializer;
+import titan.ccp.common.configuration.ServiceConfigurations;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.configuration.events.EventSerde;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.records.AggregatedActivePowerRecord;
+import titan.ccp.model.sensorregistry.ImmutableSensorRegistry;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+/**
+ * The Aggregation microservice implemented as a Flink job.
+ */
+public final class AggregationServiceFlinkJob {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(AggregationServiceFlinkJob.class);
+
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
+  private final StreamExecutionEnvironment env;
+  private final String applicationId;
+
+  /**
+   * Create a new {@link AggregationServiceFlinkJob}.
+   */
+  public AggregationServiceFlinkJob() {
+    final String applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    final String applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.applicationId = applicationName + "-" + applicationVersion;
+
+    // Execution environment configuration
+    // org.apache.flink.configuration.Configuration conf = new
+    // org.apache.flink.configuration.Configuration();
+    // conf.setBoolean(ConfigConstants.LOCAL_START_WEBSERVER, true);
+    // final StreamExecutionEnvironment env =
+    // StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);
+    this.env = StreamExecutionEnvironment.getExecutionEnvironment();
+
+    this.configureEnv();
+
+    this.buildPipeline();
+  }
+
+  private void configureEnv() {
+    this.env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
+
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+    final int commitIntervalMs = this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS);
+    if (checkpointing) {
+      this.env.enableCheckpointing(commitIntervalMs);
+    }
+
+    // Parallelism
+    final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
+    if (parallelism != null) {
+      LOGGER.info("Set parallelism: {}.", parallelism);
+      this.env.setParallelism(parallelism);
+    }
+
+    // State Backend
+    final StateBackend stateBackend = StateBackends.fromConfiguration(this.config);
+    this.env.setStateBackend(stateBackend);
+
+    this.configureSerializers();
+  }
+
+  private void configureSerializers() {
+    this.env.getConfig().registerTypeWithKryoSerializer(ImmutableSensorRegistry.class,
+        new ImmutableSensorRegistrySerializer());
+    this.env.getConfig().registerTypeWithKryoSerializer(SensorParentKey.class,
+        new SensorParentKeySerializer());
+
+    this.env.getConfig().registerTypeWithKryoSerializer(Set.of().getClass(),
+        new ImmutableSetSerializer());
+    this.env.getConfig().registerTypeWithKryoSerializer(Set.of(1).getClass(),
+        new ImmutableSetSerializer());
+    this.env.getConfig().registerTypeWithKryoSerializer(Set.of(1, 2, 3, 4).getClass(), // NOCS
+        new ImmutableSetSerializer());
+
+    this.env.getConfig().getRegisteredTypesWithKryoSerializers()
+        .forEach((c, s) -> LOGGER.info("Class " + c.getName() + " registered with serializer "
+            + s.getSerializer().getClass().getName()));
+  }
+
+  private void buildPipeline() {
+    // Get configurations
+    final String kafkaBroker = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    final String schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    final String inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+    final String outputTopic = this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC);
+    final Time windowSize =
+        Time.milliseconds(this.config.getLong(ConfigurationKeys.WINDOW_SIZE_MS));
+    final Duration windowGrace =
+        Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_GRACE_MS));
+    final String configurationTopic =
+        this.config.getString(ConfigurationKeys.CONFIGURATION_KAFKA_TOPIC);
+    final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
+
+    final KafkaConnectorFactory kafkaConnector = new KafkaConnectorFactory(
+        this.applicationId, kafkaBroker, checkpointing, schemaRegistryUrl);
+
+    // Source from input topic with ActivePowerRecords
+    final FlinkKafkaConsumer<ActivePowerRecord> kafkaInputSource =
+        kafkaConnector.createConsumer(inputTopic, ActivePowerRecord.class);
+    // TODO Watermarks?
+
+    // Source from output topic with AggregatedPowerRecords
+    final FlinkKafkaConsumer<AggregatedActivePowerRecord> kafkaOutputSource =
+        kafkaConnector.createConsumer(outputTopic, AggregatedActivePowerRecord.class);
+
+    final FlinkKafkaConsumer<Tuple2<Event, String>> kafkaConfigSource =
+        kafkaConnector.createConsumer(
+            configurationTopic,
+            EventSerde::serde,
+            Serdes::String,
+            TupleType.of(TypeInformation.of(Event.class), Types.STRING));
+
+    // Sink to output topic with SensorId, AggregatedActivePowerRecord
+    final FlinkKafkaProducer<Tuple2<String, AggregatedActivePowerRecord>> kafkaAggregationSink =
+        kafkaConnector.createProducer(
+            outputTopic,
+            Serdes::String,
+            () -> new SchemaRegistryAvroSerdeFactory(schemaRegistryUrl).forValues(),
+            Types.TUPLE(Types.STRING, TypeInformation.of(AggregatedActivePowerRecord.class)));
+
+    // Build input stream
+    final DataStream<ActivePowerRecord> inputStream = this.env.addSource(kafkaInputSource)
+        .name("[Kafka Consumer] Topic: " + inputTopic)// NOCS
+        // .rebalance()
+        .map(r -> r)
+        .name("[Map] Rebalance Forward");
+
+    // Build aggregation stream
+    final DataStream<ActivePowerRecord> aggregationsInputStream =
+        this.env.addSource(kafkaOutputSource)
+            .name("[Kafka Consumer] Topic: " + outputTopic) // NOCS
+            // .rebalance()
+            .map(r -> new ActivePowerRecord(r.getIdentifier(), r.getTimestamp(), r.getSumInW()))
+            .name("[Map] AggregatedActivePowerRecord -> ActivePowerRecord");
+
+    // Merge input and aggregation streams
+    final DataStream<ActivePowerRecord> mergedInputStream = inputStream
+        .union(aggregationsInputStream);
+
+    // Build parent sensor stream from configuration stream
+    final DataStream<Tuple2<String, Set<String>>> configurationsStream =
+        this.env.addSource(kafkaConfigSource)
+            .name("[Kafka Consumer] Topic: " + configurationTopic) // NOCS
+            .filter(tuple -> tuple.f0 == Event.SENSOR_REGISTRY_CHANGED
+                || tuple.f0 == Event.SENSOR_REGISTRY_STATUS)
+            .name("[Filter] SensorRegistry changed")
+            .map(tuple -> SensorRegistry.fromJson(tuple.f1)).name("[Map] JSON -> SensorRegistry")
+            .keyBy(sr -> 1)
+            .flatMap(new ChildParentsFlatMapFunction())
+            .name("[FlatMap] SensorRegistry -> (ChildSensor, ParentSensor[])");
+
+    final DataStream<Tuple2<SensorParentKey, ActivePowerRecord>> lastValueStream =
+        mergedInputStream.connect(configurationsStream)
+            .keyBy(ActivePowerRecord::getIdentifier,
+                (KeySelector<Tuple2<String, Set<String>>, String>) t -> t.f0)
+            .flatMap(new JoinAndDuplicateCoFlatMapFunction())
+            .name("[CoFlatMap] Join input-config, Flatten to ((Sensor, Group), ActivePowerRecord)");
+
+    final DataStream<AggregatedActivePowerRecord> aggregationStream = lastValueStream
+        .rebalance()
+        .assignTimestampsAndWatermarks(WatermarkStrategy.forBoundedOutOfOrderness(windowGrace))
+        .keyBy(t -> t.f0.getParent())
+        .window(TumblingEventTimeWindows.of(windowSize))
+        .process(new RecordAggregationProcessWindowFunction())
+        .name("[Aggregate] ((Sensor, Group), ActivePowerRecord) -> AggregatedActivePowerRecord");
+
+    // add Kafka Sink
+    aggregationStream
+        .map(value -> new Tuple2<>(value.getIdentifier(), value))
+        .name("[Map] AggregatedActivePowerRecord -> (Sensor, AggregatedActivePowerRecord)")
+        .returns(Types.TUPLE(Types.STRING, TypeInformation.of(AggregatedActivePowerRecord.class)))
+        .addSink(kafkaAggregationSink).name("[Kafka Producer] Topic: " + outputTopic);
+  }
+
+  /**
+   * Start running this microservice.
+   */
+  public void run() {
+    // Execution plan
+    LOGGER.info("Execution plan: {}", this.env.getExecutionPlan());
+
+    // Execute Job
+    try {
+      this.env.execute(this.applicationId);
+    } catch (final Exception e) { // NOPMD Execution thrown by Flink
+      LOGGER.error("An error occured while running this job.", e);
+    }
+  }
+
+  public static void main(final String[] args) {
+    new AggregationServiceFlinkJob().run();
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/ChildParentsFlatMapFunction.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/ChildParentsFlatMapFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..910dc359fa9b5b0810f7f9b6e67bfceaa68cc798
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/ChildParentsFlatMapFunction.java
@@ -0,0 +1,102 @@
+package theodolite.uc4.application;
+
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.flink.api.common.functions.RichFlatMapFunction;
+import org.apache.flink.api.common.state.MapState;
+import org.apache.flink.api.common.state.MapStateDescriptor;
+import org.apache.flink.api.common.typeinfo.TypeHint;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.util.Collector;
+import titan.ccp.model.sensorregistry.AggregatedSensor;
+import titan.ccp.model.sensorregistry.Sensor;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+/**
+ * Transforms a {@link SensorRegistry} into key value pairs of Sensor identifiers and their parents'
+ * sensor identifiers. All pairs whose sensor's parents have changed since last iteration are
+ * forwarded. A mapping of an identifier to <code>null</code> means that the corresponding sensor
+ * does not longer exists in the sensor registry.
+ */
+public class ChildParentsFlatMapFunction
+    extends RichFlatMapFunction<SensorRegistry, Tuple2<String, Set<String>>> {
+
+  private static final long serialVersionUID = 3969444219510915221L; // NOPMD
+
+  private transient MapState<String, Set<String>> state;
+
+  @Override
+  public void open(final Configuration parameters) {
+    final MapStateDescriptor<String, Set<String>> descriptor =
+        new MapStateDescriptor<>(
+            "child-parents-state",
+            TypeInformation.of(new TypeHint<String>() {}),
+            TypeInformation.of(new TypeHint<Set<String>>() {}));
+    this.state = this.getRuntimeContext().getMapState(descriptor);
+  }
+
+  @Override
+  public void flatMap(final SensorRegistry value, final Collector<Tuple2<String, Set<String>>> out)
+      throws Exception {
+    final Map<String, Set<String>> childParentsPairs = this.constructChildParentsPairs(value);
+    this.updateChildParentsPairs(childParentsPairs);
+    this.updateState(childParentsPairs);
+    childParentsPairs
+        .entrySet()
+        .stream()
+        .map(e -> new Tuple2<>(e.getKey(), e.getValue()))
+        .forEach(out::collect);
+  }
+
+  private Map<String, Set<String>> constructChildParentsPairs(final SensorRegistry registry) {
+    return this.streamAllChildren(registry.getTopLevelSensor())
+        .collect(Collectors.toMap(
+            Sensor::getIdentifier,
+            child -> child.getParent()
+                .map(p -> Set.of(p.getIdentifier()))
+                .orElseGet(Set::of)));
+  }
+
+  private Stream<Sensor> streamAllChildren(final AggregatedSensor sensor) {
+    return sensor.getChildren().stream()
+        .flatMap(s -> Stream.concat(
+            Stream.of(s),
+            s instanceof AggregatedSensor ? this.streamAllChildren((AggregatedSensor) s)
+                : Stream.empty()));
+  }
+
+  private void updateChildParentsPairs(final Map<String, Set<String>> childParentsPairs)
+      throws Exception { // NOPMD General exception thown by Flink
+    final Iterator<Map.Entry<String, Set<String>>> oldChildParentsPairs = this.state.iterator();
+    while (oldChildParentsPairs.hasNext()) {
+      final Map.Entry<String, Set<String>> oldChildParentPair = oldChildParentsPairs.next();
+      final String identifier = oldChildParentPair.getKey();
+      final Set<String> oldParents = oldChildParentPair.getValue();
+      final Set<String> newParents = childParentsPairs.get(identifier); // null if not exists
+      if (newParents == null) {
+        // Sensor was deleted
+        childParentsPairs.put(identifier, null);
+      } else if (newParents.equals(oldParents)) {
+        // No changes
+        childParentsPairs.remove(identifier);
+      }
+      // Else: Later Perhaps: Mark changed parents
+    }
+  }
+
+  private void updateState(final Map<String, Set<String>> childParentsPairs)
+      throws Exception { // NOPMD General exception thown by Flink
+    for (final Map.Entry<String, Set<String>> childParentPair : childParentsPairs.entrySet()) {
+      if (childParentPair.getValue() == null) {
+        this.state.remove(childParentPair.getKey());
+      } else {
+        this.state.put(childParentPair.getKey(), childParentPair.getValue());
+      }
+    }
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/ConfigurationKeys.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..448e8b095ef15c434655ca3c76a9e2de21244054
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/ConfigurationKeys.java
@@ -0,0 +1,42 @@
+package theodolite.uc4.application;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+  public static final String APPLICATION_NAME = "application.name";
+
+  public static final String APPLICATION_VERSION = "application.version";
+
+  public static final String CONFIGURATION_KAFKA_TOPIC = "configuration.kafka.topic";
+
+  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+
+  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
+
+  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
+
+  public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
+
+  public static final String WINDOW_SIZE_MS = "window.size.ms";
+
+  public static final String WINDOW_GRACE_MS = "window.grace.ms";
+
+  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+  public static final String FLINK_STATE_BACKEND = "flink.state.backend";
+
+  public static final String FLINK_STATE_BACKEND_PATH = "flink.state.backend.path";
+
+  public static final String FLINK_STATE_BACKEND_MEMORY_SIZE = // NOPMD
+      "flink.state.backend.memory.size";
+
+  public static final String DEBUG = "debug";
+
+  public static final String CHECKPOINTING = "checkpointing";
+
+  public static final String PARALLELISM = "parallelism";
+
+  private ConfigurationKeys() {}
+
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/JoinAndDuplicateCoFlatMapFunction.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/JoinAndDuplicateCoFlatMapFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..6ef9a72e9695cfccba0bbcca1238f7ebc94fc505
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/JoinAndDuplicateCoFlatMapFunction.java
@@ -0,0 +1,66 @@
+package theodolite.uc4.application;
+
+import java.util.Set;
+import org.apache.flink.api.common.state.MapState;
+import org.apache.flink.api.common.state.MapStateDescriptor;
+import org.apache.flink.api.common.typeinfo.TypeHint;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.streaming.api.functions.co.RichCoFlatMapFunction;
+import org.apache.flink.util.Collector;
+import theodolite.uc4.application.util.SensorParentKey;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * A {@link RichCoFlatMapFunction} which joins each incoming {@link ActivePowerRecord} with its
+ * corresponding parents. The {@link ActivePowerRecord} is duplicated for each parent. When
+ * receiving a new set of parents for a sensor, this operator updates its internal state and
+ * forwards "tombstone" record if a sensor does no longer have a certain parent.
+ */
+public class JoinAndDuplicateCoFlatMapFunction extends
+    RichCoFlatMapFunction<ActivePowerRecord, Tuple2<String, Set<String>>, Tuple2<SensorParentKey, ActivePowerRecord>> { // NOCS
+
+  private static final long serialVersionUID = -6992783644887835979L; // NOPMD
+
+  private transient MapState<String, Set<String>> state;
+
+  @Override
+  public void open(final Configuration parameters) throws Exception {
+    final MapStateDescriptor<String, Set<String>> descriptor =
+        new MapStateDescriptor<>(
+            "join-and-duplicate-state",
+            TypeInformation.of(new TypeHint<String>() {}),
+            TypeInformation.of(new TypeHint<Set<String>>() {}));
+    this.state = this.getRuntimeContext().getMapState(descriptor);
+  }
+
+  @Override
+  public void flatMap1(final ActivePowerRecord value,
+      final Collector<Tuple2<SensorParentKey, ActivePowerRecord>> out) throws Exception {
+    final Set<String> parents = this.state.get(value.getIdentifier());
+    if (parents == null) {
+      return;
+    }
+    for (final String parent : parents) {
+      out.collect(new Tuple2<>(new SensorParentKey(value.getIdentifier(), parent), value));
+    }
+  }
+
+  @Override
+  public void flatMap2(final Tuple2<String, Set<String>> value,
+      final Collector<Tuple2<SensorParentKey, ActivePowerRecord>> out) throws Exception {
+    final String sensor = value.f0;
+    final Set<String> oldParents = this.state.get(sensor);
+    final Set<String> newParents = value.f1;
+    if (oldParents != null && !newParents.equals(oldParents)) {
+      for (final String oldParent : oldParents) {
+        if (!newParents.contains(oldParent)) {
+          // Parent was deleted, emit tombstone record
+          out.collect(new Tuple2<>(new SensorParentKey(sensor, oldParent), null));
+        }
+      }
+    }
+    this.state.put(sensor, newParents);
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/RecordAggregationProcessWindowFunction.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/RecordAggregationProcessWindowFunction.java
new file mode 100644
index 0000000000000000000000000000000000000000..45d4a09d153881572c949d2af7542f9cffb5622d
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/RecordAggregationProcessWindowFunction.java
@@ -0,0 +1,102 @@
+package theodolite.uc4.application;
+
+import org.apache.flink.api.common.state.MapState;
+import org.apache.flink.api.common.state.MapStateDescriptor;
+import org.apache.flink.api.common.state.ValueState;
+import org.apache.flink.api.common.state.ValueStateDescriptor;
+import org.apache.flink.api.common.typeinfo.TypeHint;
+import org.apache.flink.api.common.typeinfo.TypeInformation;
+import org.apache.flink.api.java.tuple.Tuple2;
+import org.apache.flink.configuration.Configuration;
+import org.apache.flink.streaming.api.functions.windowing.ProcessWindowFunction;
+import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
+import org.apache.flink.util.Collector;
+import theodolite.uc4.application.util.SensorParentKey;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.records.AggregatedActivePowerRecord;
+
+/**
+ * A {@link ProcessWindowFunction} which performs the windowed aggregation of all
+ * {@link ActivePowerRecord} for the same {@link SensorParentKey}. Result of this aggregation is an
+ * {@link AggregatedActivePowerRecord}.
+ */
+public class RecordAggregationProcessWindowFunction extends
+    ProcessWindowFunction<Tuple2<SensorParentKey, ActivePowerRecord>, AggregatedActivePowerRecord, String, TimeWindow> { // NOCS
+
+  private static final long serialVersionUID = 6030159552332624435L; // NOPMD
+
+  private transient MapState<SensorParentKey, ActivePowerRecord> lastValueState;
+  private transient ValueState<AggregatedActivePowerRecord> aggregateState;
+
+  @Override
+  public void open(final Configuration parameters) {
+    final MapStateDescriptor<SensorParentKey, ActivePowerRecord> lastValueStateDescriptor =
+        new MapStateDescriptor<>(
+            "last-value-state",
+            TypeInformation.of(new TypeHint<SensorParentKey>() {}),
+            TypeInformation.of(new TypeHint<ActivePowerRecord>() {}));
+    this.lastValueState = this.getRuntimeContext().getMapState(lastValueStateDescriptor);
+
+    final ValueStateDescriptor<AggregatedActivePowerRecord> aggregateStateDescriptor =
+        new ValueStateDescriptor<>(
+            "aggregation-state",
+            TypeInformation.of(new TypeHint<AggregatedActivePowerRecord>() {}));
+    this.aggregateState = this.getRuntimeContext().getState(aggregateStateDescriptor);
+  }
+
+  @Override
+  public void process(
+      final String key,
+      final Context context,
+      final Iterable<Tuple2<SensorParentKey, ActivePowerRecord>> elements,
+      final Collector<AggregatedActivePowerRecord> out) throws Exception {
+    for (final Tuple2<SensorParentKey, ActivePowerRecord> t : elements) {
+      AggregatedActivePowerRecord currentAggregate = this.aggregateState.value();
+      if (currentAggregate == null) {
+        currentAggregate = new AggregatedActivePowerRecord(key, 0L, 0L, 0.0, 0.0);
+        this.aggregateState.update(currentAggregate);
+      }
+      long count = currentAggregate.getCount();
+
+      final SensorParentKey sensorParentKey = t.f0;
+      ActivePowerRecord newRecord = t.f1;
+      if (newRecord == null) { // sensor was deleted -> decrease count, set newRecord to zero
+        count--;
+        newRecord = new ActivePowerRecord(sensorParentKey.getSensor(), 0L, 0.0);
+      }
+
+      // get last value of this record from state or create 0 valued record
+      ActivePowerRecord previousRecord = this.lastValueState.get(sensorParentKey);
+      if (previousRecord == null) { // sensor was added -> increase count
+        count++;
+        previousRecord = new ActivePowerRecord(sensorParentKey.getSensor(), 0L, 0.0);
+      }
+
+      // if incoming record is older than the last saved record, skip the record
+      if (newRecord.getTimestamp() < previousRecord.getTimestamp()) {
+        continue;
+      }
+
+      // prefer newer timestamp, but use previous if 0 -> sensor was deleted
+      final long timestamp =
+          newRecord.getTimestamp() == 0 ? previousRecord.getTimestamp() : newRecord.getTimestamp();
+      final double sumInW =
+          currentAggregate.getSumInW() - previousRecord.getValueInW() + newRecord.getValueInW();
+      final double avgInW = count == 0 ? 0 : sumInW / count;
+
+      final AggregatedActivePowerRecord newAggregate = new AggregatedActivePowerRecord(
+          sensorParentKey.getParent(),
+          timestamp,
+          count,
+          sumInW,
+          avgInW);
+
+      // update state and aggregateState
+      this.lastValueState.put(sensorParentKey, newRecord);
+      this.aggregateState.update(newAggregate);
+    }
+
+    // emit aggregated record
+    out.collect(this.aggregateState.value());
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/ImmutableSensorRegistrySerializer.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/ImmutableSensorRegistrySerializer.java
new file mode 100644
index 0000000000000000000000000000000000000000..e157f35c8a052d2d4a28526a0d98d56515d586d6
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/ImmutableSensorRegistrySerializer.java
@@ -0,0 +1,28 @@
+package theodolite.uc4.application.util;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import java.io.Serializable;
+import titan.ccp.model.sensorregistry.ImmutableSensorRegistry;
+
+/**
+ * A {@link Serializer} for {@link ImmutableSensorRegistry}s.
+ */
+public class ImmutableSensorRegistrySerializer extends Serializer<ImmutableSensorRegistry>
+    implements Serializable {
+
+  private static final long serialVersionUID = 1806411056006113017L; // NOPMD
+
+  @Override
+  public void write(final Kryo kryo, final Output output, final ImmutableSensorRegistry object) {
+    output.writeString(object.toJson());
+  }
+
+  @Override
+  public ImmutableSensorRegistry read(final Kryo kryo, final Input input,
+      final Class<ImmutableSensorRegistry> type) {
+    return (ImmutableSensorRegistry) ImmutableSensorRegistry.fromJson(input.readString());
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/ImmutableSetSerializer.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/ImmutableSetSerializer.java
new file mode 100644
index 0000000000000000000000000000000000000000..6b2dbcdfb403705b39815dd31112deab7947d83d
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/ImmutableSetSerializer.java
@@ -0,0 +1,51 @@
+package theodolite.uc4.application.util;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import java.io.Serializable;
+import java.util.Set;
+
+/**
+ * A {@link Serializer} for serializing arbitrary {@link Set}s of {@link Object}s.
+ */
+public final class ImmutableSetSerializer extends Serializer<Set<Object>> implements Serializable {
+
+  private static final long serialVersionUID = 6919877826110724620L; // NOPMD
+
+  public ImmutableSetSerializer() {
+    super(false, true);
+  }
+
+  @Override
+  public void write(final Kryo kryo, final Output output, final Set<Object> object) {
+    output.writeInt(object.size(), true);
+    for (final Object elm : object) {
+      kryo.writeClassAndObject(output, elm);
+    }
+  }
+
+  @Override
+  public Set<Object> read(final Kryo kryo, final Input input, final Class<Set<Object>> type) {
+    final int size = input.readInt(true);
+    final Object[] list = new Object[size];
+    for (int i = 0; i < size; ++i) {
+      list[i] = kryo.readClassAndObject(input);
+    }
+    return Set.of(list);
+  }
+
+  /**
+   * Creates a new {@link ImmutableSetSerializer} and registers its serializer for the several
+   * related classes.
+   *
+   * @param kryo the {@link Kryo} instance to set the serializer on
+   */
+  public static void registerSerializers(final Kryo kryo) {
+    final ImmutableSetSerializer serializer = new ImmutableSetSerializer();
+    kryo.register(Set.of().getClass(), serializer);
+    kryo.register(Set.of(1).getClass(), serializer);
+    kryo.register(Set.of(1, 2, 3, 4).getClass(), serializer); // NOCS
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/SensorParentKey.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/SensorParentKey.java
new file mode 100644
index 0000000000000000000000000000000000000000..903b66dd12a2864d522fde7eb7cf3fdc2ec73bcd
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/SensorParentKey.java
@@ -0,0 +1,51 @@
+package theodolite.uc4.application.util;
+
+import java.util.Objects;
+
+/**
+ * A key consisting of the identifier of a sensor and an identifier of parent sensor.
+ */
+public class SensorParentKey {
+
+  private final String sensorIdentifier;
+
+  private final String parentIdentifier;
+
+  public SensorParentKey(final String sensorIdentifier, final String parentIdentifier) {
+    this.sensorIdentifier = sensorIdentifier;
+    this.parentIdentifier = parentIdentifier;
+  }
+
+  public String getSensor() {
+    return this.sensorIdentifier;
+  }
+
+  public String getParent() {
+    return this.parentIdentifier;
+  }
+
+  @Override
+  public String toString() {
+    return "{" + this.sensorIdentifier + ", " + this.parentIdentifier + "}";
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.sensorIdentifier, this.parentIdentifier);
+  }
+
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (!(obj instanceof SensorParentKey)) {
+      return false;
+    }
+    final SensorParentKey k = (SensorParentKey) obj;
+    return this.sensorIdentifier.equals(k.sensorIdentifier)
+        && this.parentIdentifier.equals(k.parentIdentifier);
+  }
+
+
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/SensorParentKeySerializer.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/SensorParentKeySerializer.java
new file mode 100644
index 0000000000000000000000000000000000000000..bdd403a05de8f54f636568e839f5f48effd43d58
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/util/SensorParentKeySerializer.java
@@ -0,0 +1,30 @@
+package theodolite.uc4.application.util;
+
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.Serializer;
+import com.esotericsoftware.kryo.io.Input;
+import com.esotericsoftware.kryo.io.Output;
+import java.io.Serializable;
+
+/**
+ * Kryo serializer for {@link SensorParentKey}.
+ */
+public final class SensorParentKeySerializer extends Serializer<SensorParentKey>
+    implements Serializable {
+
+  private static final long serialVersionUID = -867781963471414857L; // NOPMD
+
+  @Override
+  public void write(final Kryo kryo, final Output output, final SensorParentKey object) {
+    output.writeString(object.getSensor());
+    output.writeString(object.getParent());
+  }
+
+  @Override
+  public SensorParentKey read(final Kryo kryo, final Input input,
+      final Class<SensorParentKey> type) {
+    final String sensor = input.readString();
+    final String parent = input.readString();
+    return new SensorParentKey(sensor, parent);
+  }
+}
diff --git a/theodolite-benchmarks/uc4-flink/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc4-flink/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..de85fdb88c0462edc9fba58409918470fcb8cb6c
--- /dev/null
+++ b/theodolite-benchmarks/uc4-flink/src/main/resources/META-INF/application.properties
@@ -0,0 +1,16 @@
+application.name=theodolite-uc2-application
+application.version=0.0.1
+
+configuration.host=localhost
+configuration.port=8082
+configuration.kafka.topic=configuration
+
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+schema.registry.url=http://localhost:8081
+window.size.ms=1000
+window.grace.ms=0
+num.threads=1
+commit.interval.ms=1000
+cache.max.bytes.buffering=-1
diff --git a/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc4-kstreams/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
rename to theodolite-benchmarks/uc4-kstreams/.settings/org.eclipse.jdt.ui.prefs
diff --git a/theodolite-benchmarks/uc4-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc4-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/theodolite-benchmarks/uc4-kstreams/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/theodolite-benchmarks/uc4-kstreams/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc4-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/theodolite-benchmarks/uc4-kstreams/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/benchmarks/uc4-application/Dockerfile b/theodolite-benchmarks/uc4-kstreams/Dockerfile
similarity index 52%
rename from benchmarks/uc4-application/Dockerfile
rename to theodolite-benchmarks/uc4-kstreams/Dockerfile
index add251c0ef11324830bcada9174fbbdecc18d532..f50f09995a0479de0d7deb8c68184c2dd3ed1461 100644
--- a/benchmarks/uc4-application/Dockerfile
+++ b/theodolite-benchmarks/uc4-kstreams/Dockerfile
@@ -1,6 +1,6 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/uc4-application.tar /
+ADD build/distributions/uc4-kstreams.tar /
 
 CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
-     /uc4-application/bin/uc4-application
+     /uc4-kstreams/bin/uc4-kstreams
diff --git a/benchmarks/uc4-application/README.md b/theodolite-benchmarks/uc4-kstreams/README.md
similarity index 100%
rename from benchmarks/uc4-application/README.md
rename to theodolite-benchmarks/uc4-kstreams/README.md
diff --git a/benchmarks/uc4-application/build.gradle b/theodolite-benchmarks/uc4-kstreams/build.gradle
similarity index 58%
rename from benchmarks/uc4-application/build.gradle
rename to theodolite-benchmarks/uc4-kstreams/build.gradle
index 9cb1b311d8f50769d371952db886e4a00a454591..83212a499ae344ea44beb3c2b98aec147dda8488 100644
--- a/benchmarks/uc4-application/build.gradle
+++ b/theodolite-benchmarks/uc4-kstreams/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.kstreams'
+}
+
 mainClassName = "theodolite.uc4.application.AggregationService"
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/AggregationService.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/application/AggregationService.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/application/AggregationService.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/application/AggregationService.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
similarity index 100%
rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
rename to theodolite-benchmarks/uc4-kstreams/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
diff --git a/benchmarks/uc4-application/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc4-kstreams/src/main/resources/META-INF/application.properties
similarity index 86%
rename from benchmarks/uc4-application/src/main/resources/META-INF/application.properties
rename to theodolite-benchmarks/uc4-kstreams/src/main/resources/META-INF/application.properties
index ce06091076e6ff7f9ede355c7f54c12b3d872119..a21f7e917e3ce4a0762261ca90444613c82ab650 100644
--- a/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
+++ b/theodolite-benchmarks/uc4-kstreams/src/main/resources/META-INF/application.properties
@@ -7,7 +7,7 @@ kafka.configuration.topic=configuration
 kafka.feedback.topic=aggregation-feedback
 kafka.output.topic=output
 
-schema.registry.url=http://localhost:8091
+schema.registry.url=http://localhost:8081
 
 emit.period.ms=5000
 grace.period.ms=0
\ No newline at end of file
diff --git a/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java b/theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java
similarity index 100%
rename from benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java
rename to theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java
diff --git a/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java b/theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java
similarity index 100%
rename from benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java
rename to theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java
diff --git a/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java b/theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java
similarity index 100%
rename from benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java
rename to theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java
diff --git a/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java b/theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java
similarity index 100%
rename from benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java
rename to theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java
diff --git a/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java b/theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java
similarity index 100%
rename from benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java
rename to theodolite-benchmarks/uc4-kstreams/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java
diff --git a/theodolite-benchmarks/uc4-load-generator/.settings/org.eclipse.jdt.ui.prefs b/theodolite-benchmarks/uc4-load-generator/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..4d01df75552c562406705858b6368ecf59d6e82f
--- /dev/null
+++ b/theodolite-benchmarks/uc4-load-generator/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,128 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+org.eclipse.jdt.ui.text.custom_code_templates=
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=false
+sp_cleanup.remove_redundant_semicolons=true
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/theodolite-benchmarks/uc4-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/theodolite-benchmarks/uc4-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/theodolite-benchmarks/uc4-load-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/theodolite-benchmarks/uc4-load-generator/.settings/qa.eclipse.plugin.pmd.prefs b/theodolite-benchmarks/uc4-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/theodolite-benchmarks/uc4-load-generator/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/theodolite-benchmarks/uc4-load-generator/Dockerfile b/theodolite-benchmarks/uc4-load-generator/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..7723121ed337492af5a2e5b3ca3e026b1a7a3478
--- /dev/null
+++ b/theodolite-benchmarks/uc4-load-generator/Dockerfile
@@ -0,0 +1,6 @@
+FROM openjdk:11-slim
+
+ADD build/distributions/uc4-load-generator.tar /
+
+CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc4-load-generator/bin/uc4-load-generator
diff --git a/benchmarks/uc4-workload-generator/build.gradle b/theodolite-benchmarks/uc4-load-generator/build.gradle
similarity index 58%
rename from benchmarks/uc4-workload-generator/build.gradle
rename to theodolite-benchmarks/uc4-load-generator/build.gradle
index 8865ec9391213f3d8c52be2366573dee09652087..9785718056fa1a14d687a75237cd23b941ce7365 100644
--- a/benchmarks/uc4-workload-generator/build.gradle
+++ b/theodolite-benchmarks/uc4-load-generator/build.gradle
@@ -1 +1,5 @@
+plugins {
+  id 'theodolite.load-generator'
+}
+
 mainClassName = "theodolite.uc4.workloadgenerator.LoadGenerator"
diff --git a/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java b/theodolite-benchmarks/uc4-load-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java
similarity index 100%
rename from benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java
rename to theodolite-benchmarks/uc4-load-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java
diff --git a/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/theodolite-benchmarks/uc4-load-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
rename to theodolite-benchmarks/uc4-load-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
diff --git a/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java b/theodolite-benchmarks/uc4-load-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java
similarity index 100%
rename from benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java
rename to theodolite-benchmarks/uc4-load-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java
diff --git a/benchmarks/uc4-workload-generator/src/main/resources/META-INF/application.properties b/theodolite-benchmarks/uc4-load-generator/src/main/resources/META-INF/application.properties
similarity index 100%
rename from benchmarks/uc4-workload-generator/src/main/resources/META-INF/application.properties
rename to theodolite-benchmarks/uc4-load-generator/src/main/resources/META-INF/application.properties
diff --git a/benchmarks/uc4-workload-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java b/theodolite-benchmarks/uc4-load-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java
similarity index 100%
rename from benchmarks/uc4-workload-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java
rename to theodolite-benchmarks/uc4-load-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java
diff --git a/theodolite/.dockerignore b/theodolite/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..680e535674de90720f521c92a5ad518100f906b8
--- /dev/null
+++ b/theodolite/.dockerignore
@@ -0,0 +1,5 @@
+*
+!build/*-runner
+!build/*-runner.jar
+!build/lib/*
+!build/quarkus-app/*
diff --git a/theodolite/.gitignore b/theodolite/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a1eff0e1d4dddacdbcafa2c235b28616cb53e7bf
--- /dev/null
+++ b/theodolite/.gitignore
@@ -0,0 +1,33 @@
+# Gradle
+.gradle/
+build/
+
+# Eclipse
+.project
+.classpath
+.settings/
+bin/
+
+# IntelliJ
+.idea
+*.ipr
+*.iml
+*.iws
+
+# NetBeans
+nb-configuration.xml
+
+# Visual Studio Code
+.vscode
+.factorypath
+
+# OSX
+.DS_Store
+
+# Vim
+*.swp
+*.swo
+
+# patch
+*.orig
+*.rej
diff --git a/theodolite/README.md b/theodolite/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..60bd56d933d8955217120465c47e70b4b34585e1
--- /dev/null
+++ b/theodolite/README.md
@@ -0,0 +1,159 @@
+# Theodolite project
+
+This project uses Quarkus, the Supersonic Subatomic Java Framework.
+
+If you want to learn more about Quarkus, please visit its website: <https://quarkus.io/> .
+
+## Running the application in dev mode
+
+You can run your application in dev mode using:
+
+```sh
+./gradlew quarkusDev
+```
+
+### Hint for running with k3s (or k3d)
+
+You may need to add the following dependencies to the `build.gradle` file when running Theodolite with k3s.
+
+```
+implementation 'org.bouncycastle:bcprov-ext-jdk15on:1.68'
+implementation 'org.bouncycastle:bcpkix-jdk15on:1.68'
+```
+
+## Packaging and running the application
+
+The application can be packaged using:
+
+```sh
+./gradlew build
+```
+
+It produces the `theodolite-1.0.0-SNAPSHOT-runner.jar` file in the `/build` directory. Be aware that it’s not
+an _über-jar_ as the dependencies are copied into the `build/lib` directory.
+
+If you want to build an _über-jar_, execute the following command:
+
+```sh
+./gradlew build -Dquarkus.package.type=uber-jar
+```
+
+The application is now runnable using `java -jar build/theodolite-1.0.0-SNAPSHOT-runner.jar`.
+
+## Creating a native executable
+
+It is recommended to use the native GraalVM images to create executable jars from Theodolite. For more information please visit the [Native Image guide](https://www.graalvm.org/reference-manual/native-image/).
+
+You can create a native executable using:
+
+```sh
+./gradlew build -Dquarkus.package.type=native
+```
+
+Or, if you don't have GraalVM installed, you can run the native executable build in a container using:
+
+```sh
+./gradlew build -Dquarkus.package.type=native -Dquarkus.native.container-build=true
+```
+
+You can then execute your native executable with:
+```./build/theodolite-1.0.0-SNAPSHOT-runner```
+
+If you want to learn more about building native executables, please consult <https://quarkus.io/guides/gradle-tooling>.
+
+## Build docker images
+
+For the jvm version use:
+
+```sh
+./gradlew build
+docker build -f src/main/docker/Dockerfile.jvm -t theodolite-jvm .
+```
+
+For the native image version use:
+
+```sh
+./gradlew build -Dquarkus.package.type=native
+docker build -f src/main/docker/Dockerfile.native -t theodolite-native .
+```
+
+## Execute docker images
+
+Remember to set the environment variables first.
+
+Jvm version:
+
+```sh
+docker run -i --rm theodolite-jvm
+```
+
+Native image version:
+
+```sh
+docker run -i --rm theodolite-native
+```
+
+## Environment variables
+
+**Execution in Docker**:
+
+| Variables name               | Default value                      |Usage         |
+| -----------------------------|:----------------------------------:| ------------:|
+| `NAMESPACE`                  | `default`                          |Determines the namespace of the Theodolite will be executed in. Used in the KubernetesBenchmark|
+| `THEODOLITE_EXECUTION`       |  `execution/execution.yaml`        |The complete path to the benchmarkExecution file. Used in the TheodoliteYamlExecutor. |
+| `THEODOLITE_BENCHMARK_TYPE`  |  `benchmark/benchmark.yaml`        |The complete path to the benchmarkType file. Used in the TheodoliteYamlExecutor.|
+| `THEODOLITE_APP_RESOURCES`   |  `./benchmark-resources`           |The path under which the yamls for the resources for the subexperiments are found. Used in the KubernetesBenchmark|
+| `MODE`                       | `standalone`                       |Defines the mode of operation: either `standalone` or `operator`
+
+**Execution in IntelliJ**:
+
+When running Theodolite from within IntelliJ via
+[Run Configurations](https://www.jetbrains.com/help/idea/work-with-gradle-tasks.html#gradle_run_config), set the *Environment variables* field to:
+
+Set the following environment variables to run the example in the `standalone` mode within the IDE:
+
+```sh
+THEODOLITE_BENCHMARK=./../../../../examples/standalone/example-benchmark.yaml;THEODOLITE_EXECUTION=./../../../../examples/standalone/example-execution.yaml;THEODOLITE_APP_RESOURCES=./../../../../examples/resources;
+```
+
+Alternative:
+
+``` sh
+export THEODOLITE_BENCHMARK=./../../../../examples/standalone/example-benchmark.yaml
+export THEODOLITE_APP_RESOURCES=./../../../../examples/resources;
+export THEODOLITE_EXECUTION=./../../../../examples/standalone/example-execution.yaml
+./gradlew quarkusDev
+```
+
+Set the following environment variables to run the example in the `operator` mode within the IDE:
+
+```sh
+THEODOLITE_APP_RESOURCES=./../../../../examples/resources;MODE=operator
+```
+
+Alternative:
+
+``` sh
+export THEODOLITE_APP_RESOURCES=./../../../../examples/resources;
+export MODE=operator
+./gradlew quarkusDev
+```
+
+Additionally, the benchmark and execution resources must be installed.
+
+### Install Detekt Code analysis Plugin
+
+Install <https://plugins.jetbrains.com/plugin/10761-detekt>
+
+- Install the plugin
+- Navigate to Settings/Preferences -> Tools -> Detekt
+- Check Enable Detekt
+- Specify your detekt configuration and baseline file (optional)
+
+-> detekt issues will be annotated on-the-fly while coding
+
+**ingore Failures in build**: add
+
+```ignoreFailures = true```
+
+to build.gradle detekt task
diff --git a/theodolite/build.gradle b/theodolite/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..3082deaf12fc48c6aca97ffd00b9c74cd7e6c143
--- /dev/null
+++ b/theodolite/build.gradle
@@ -0,0 +1,68 @@
+plugins {
+    id 'org.jetbrains.kotlin.jvm' version "1.3.72"
+    id "org.jetbrains.kotlin.plugin.allopen" version "1.3.72"
+    id 'io.quarkus'
+    id "io.gitlab.arturbosch.detekt" version "1.15.0"   //For code style
+    id "org.jlleitschuh.gradle.ktlint" version "10.0.0" // same as above
+}
+
+repositories {
+    mavenLocal()
+    mavenCentral()
+    jcenter()
+}
+
+dependencies {
+    implementation enforcedPlatform("${quarkusPlatformGroupId}:${quarkusPlatformArtifactId}:${quarkusPlatformVersion}")
+    implementation 'io.quarkus:quarkus-kotlin'
+    implementation 'org.jetbrains.kotlin:kotlin-stdlib-jdk8'
+    implementation 'io.quarkus:quarkus-arc'
+    implementation 'io.quarkus:quarkus-resteasy'
+    implementation 'com.google.code.gson:gson:2.8.5'
+    implementation 'org.slf4j:slf4j-simple:1.7.29'
+    implementation 'io.github.microutils:kotlin-logging:1.12.0'
+    implementation('io.fabric8:kubernetes-client:5.4.1'){force = true}
+    implementation('io.fabric8:kubernetes-model-core:5.4.1'){force = true}
+    implementation('io.fabric8:kubernetes-model-common:5.4.1'){force = true}
+    implementation 'org.apache.kafka:kafka-clients:2.7.0'
+    implementation 'khttp:khttp:1.0.0'
+
+    compile 'junit:junit:4.12'
+
+    testImplementation 'io.quarkus:quarkus-junit5'
+    testImplementation 'io.rest-assured:rest-assured'
+    testImplementation 'org.junit-pioneer:junit-pioneer:1.4.0'
+    testImplementation ('io.fabric8:kubernetes-server-mock:5.4.1'){force = true}
+}
+
+group 'theodolite'
+version '0.5.0-SNAPSHOT'
+
+java {
+    sourceCompatibility = JavaVersion.VERSION_11
+    targetCompatibility = JavaVersion.VERSION_11
+}
+
+allOpen {
+    annotation("javax.ws.rs.Path")
+    annotation("javax.enterprise.context.ApplicationScoped")
+    annotation("io.quarkus.test.junit.QuarkusTest")
+}
+
+compileKotlin {
+    kotlinOptions.jvmTarget = JavaVersion.VERSION_11
+    kotlinOptions.javaParameters = true
+}
+
+compileTestKotlin {
+    kotlinOptions.jvmTarget = JavaVersion.VERSION_11
+}
+detekt {
+    failFast = true // fail build on any finding
+    buildUponDefaultConfig = true
+    ignoreFailures = true
+}
+
+ktlint {
+    ignoreFailures = true
+}
\ No newline at end of file
diff --git a/theodolite/build_jvm.sh b/theodolite/build_jvm.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f4dd32fc5228576f09e95f0e8ac06fa08ea6acc7
--- /dev/null
+++ b/theodolite/build_jvm.sh
@@ -0,0 +1,6 @@
+
+./gradlew build -x test
+
+docker build -f src/main/docker/Dockerfile.jvm -t quarkus/theodolite-jvm .
+
+docker run -i --rm -p 8080:8080 quarkus/theodolite-jvm
diff --git a/theodolite/build_native.sh b/theodolite/build_native.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c2d7d81f35a24af951005bb30c52a8ab494ddb64
--- /dev/null
+++ b/theodolite/build_native.sh
@@ -0,0 +1,6 @@
+
+./gradlew build -Dquarkus.package.type=native -x test
+
+docker build -f src/main/docker/Dockerfile.native -t quarkus/theodolite .
+
+docker run -i --rm -p 8080:8080 quarkus/theodolite
diff --git a/theodolite/crd/crd-benchmark.yaml b/theodolite/crd/crd-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ab2e5f3b890a883f68dbbd36805f3791158f256
--- /dev/null
+++ b/theodolite/crd/crd-benchmark.yaml
@@ -0,0 +1,255 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: benchmarks.theodolite.com
+spec:
+  group: theodolite.com
+  names:
+    kind: benchmark
+    plural: benchmarks
+    shortNames:
+      - bench
+  versions:
+  - name: v1
+    served: true
+    storage: true
+    schema:
+      openAPIV3Schema:
+        type: object
+        required: ["spec"]
+        properties:
+          spec:
+            type: object
+            required: ["sut", "loadGenerator", "resourceTypes", "loadTypes", "kafkaConfig"]
+            properties:
+              name:
+                description: This field exists only for technical reasons and should not be set by the user. The value of the field will be overwritten.
+                type: string
+                default: ""
+              infrastructure:
+                description: (Optional) A list of file names that reference Kubernetes resources that are deployed on the cluster to create the required infrastructure.
+                type: object
+                default: {}
+                properties:
+                  resources:
+                    type: array
+                    default: []
+                    items:
+                      type: object
+                      oneOf:
+                        - required: [ configMap ]
+                        - required: [ fileSystem ]
+                      properties:
+                        configMap:
+                          description: The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.
+                          type: object
+                          properties:
+                            name:
+                              description: The name of the configMap
+                              type: string
+                            files:
+                              description: (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.
+                              type: array
+                              items:
+                                type: string
+                        fileSystem:
+                          description: The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.
+                          type: object
+                          properties:
+                            path:
+                              description: The path to the folder which contains the Kubernetes manifests files.
+                              type: string
+                            files:
+                              description: (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.
+                              type: array
+                              items:
+                                type: string
+              sut:
+                description: The appResourceSets specifies all Kubernetes resources required to start the sut. A resourceSet can be either a configMap resourceSet or a fileSystem resourceSet.
+                type: object
+                properties:
+                  resources:
+                    type: array
+                    default: [ ]
+                    items:
+                      type: object
+                      oneOf:
+                        - required: [ configMap ]
+                        - required: [ fileSystem ]
+                      properties:
+                        configMap:
+                          description: The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.
+                          type: object
+                          properties:
+                            name:
+                              description: The name of the configMap
+                              type: string
+                            files:
+                              description: (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.
+                              type: array
+                              items:
+                                type: string
+                        fileSystem:
+                          description: The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.
+                          type: object
+                          properties:
+                            path:
+                              description: The path to the folder which contains the Kubernetes manifests files.
+                              type: string
+                            files:
+                              description: (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.
+                              type: array
+                              items:
+                                type: string
+              loadGenerator:
+                description: The loadGenResourceSets specifies all Kubernetes resources required to start the load generator. A resourceSet can be either a configMap resourceSet or a fileSystem resourceSet.
+                type: object
+                properties:
+                  resources:
+                    type: array
+                    default: []
+                    items:
+                      type: object
+                      oneOf:
+                        - required: [ configMap ]
+                        - required: [ fileSystem ]
+                      properties:
+                        configMap:
+                          description: The configMap resourceSet loads the Kubernetes manifests from an Kubernetes configMap.
+                          type: object
+                          properties:
+                            name:
+                              description: The name of the configMap
+                              type: string
+                            files:
+                              description: (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.
+                              type: array
+                              items:
+                                type: string
+                        fileSystem:
+                          description: The fileSystem resourceSet loads the Kubernetes manifests from the filesystem.
+                          type: object
+                          properties:
+                            path:
+                              description: The path to the folder which contains the Kubernetes manifests files.
+                              type: string
+                            files:
+                              description: (Optional) Specifies which files from the configMap should be loaded. If this field is not set, all files are loaded.
+                              type: array
+                              items:
+                                type: string
+              resourceTypes:
+                description: A list of resource types that can be scaled for this `benchmark` resource. For each resource type the concrete values are defined in the `execution` object.
+                type: array
+                minItems: 1
+                items:
+                  type: object
+                  required: ["typeName", "patchers"]
+                  properties:
+                    typeName:
+                      description: Name of the resource type.
+                      type: string
+                    patchers:
+                      description: List of patchers used to scale this resource type.
+                      type: array
+                      minItems: 1
+                      items:
+                        type: object
+                        required: ["type", "resource"]
+                        properties:
+                          type:
+                            description: Type of the patcher.
+                            type: string
+                            default: ""
+                          resource:
+                            description: Specifies the Kubernetes resource to be patched.
+                            type: string
+                            default: ""
+                          properties:
+                            description: (Optional) Patcher specific additional arguments.
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+              loadTypes:
+                description: A list of load types that can be scaled for this benchmark. For each load type the concrete values are defined in the execution object.
+                type: array
+                minItems: 1
+                items:
+                  type: object
+                  required: ["typeName","patchers"]
+                  properties:
+                    typeName:
+                      description: Name of the load type.
+                      type: string
+                    patchers:
+                      description: List of patchers used to scale this resource type.
+                      type: array
+                      minItems: 1
+                      items:
+                        type: object
+                        required: ["type", "resource"]
+                        properties:
+                          type:
+                            description: Type of the Patcher.
+                            type: string
+                            default: ""
+                          resource:
+                            description: Specifies the Kubernetes resource to be patched.
+                            type: string
+                            default: ""
+                          properties:
+                            description: (Optional) Patcher specific additional arguments.
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+              kafkaConfig:
+                description: Contains the Kafka configuration.
+                type: object
+                required: ["bootstrapServer", "topics"]
+                properties:
+                  bootstrapServer:
+                    description: The bootstrap servers connection string.
+                    type: string
+                  topics:
+                    description: List of topics to be created for each experiment. Alternative theodolite offers the possibility to remove certain topics after each experiment.
+                    type: array
+                    minItems: 1
+                    items:
+                      type: object
+                      required: ["name"]
+                      properties:
+                        name:
+                          description: The name of the topic.
+                          type: string
+                          default: ""
+                        numPartitions:
+                          description: The number of partitions of the topic.
+                          type: integer
+                          default: 0
+                        replicationFactor:
+                          description: The replication factor of the topic.
+                          type: integer
+                          default: 0
+                        removeOnly:
+                          description: Determines if this topic should only be deleted after each experiement. For removeOnly topics the name can be a RegEx describing the topic.
+                          type: boolean
+                          default: false
+          status:
+            type: object
+            properties:
+              resourceSetsState:
+                description: The status of a Benchmark indicates whether all resources are available to start the benchmark or not.
+                type: string
+    additionalPrinterColumns:
+    - name: Age
+      type: date
+      jsonPath: .metadata.creationTimestamp
+    - name: STATUS
+      type: string
+      description: The status of a Benchmark indicates whether all resources are available to start the benchmark or not.
+      jsonPath: .status.resourceSetsState
+    subresources:
+      status: {}
+  scope: Namespaced
\ No newline at end of file
diff --git a/theodolite/crd/crd-execution.yaml b/theodolite/crd/crd-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d9cd41903bb2fdc18bd6640bdbe2eb764b2106ab
--- /dev/null
+++ b/theodolite/crd/crd-execution.yaml
@@ -0,0 +1,152 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: executions.theodolite.com
+spec:
+  group: theodolite.com
+  names:
+    kind: execution
+    plural: executions
+    shortNames:
+      - exec
+  versions:
+  - name: v1
+    served: true
+    storage: true
+    schema:
+      openAPIV3Schema:
+        type: object
+        required: ["spec"]
+        properties:
+          spec:
+            type: object
+            required: ["benchmark", "load", "resources", "slos", "execution", "configOverrides"]
+            properties:
+              name:
+                description: This field exists only for technical reasons and should not be set by the user. The value of the field will be overwritten.
+                type: string
+                default: ""
+              benchmark:
+                description: The name of the benchmark this execution is referring to.
+                type: string
+              load: # definition of the load dimension
+                description: Specifies the load values that are benchmarked.
+                type: object
+                required: ["loadType", "loadValues"]
+                properties:
+                  loadType:
+                    description: The type of the load. It must match one of the load types specified in the referenced benchmark.
+                    type: string
+                  loadValues:
+                    description: List of load values for the specified load type.
+                    type: array
+                    items:
+                      type: integer
+              resources: # definition of the resource dimension
+                description: Specifies the scaling resource that is benchmarked.
+                type: object
+                required: ["resourceType", "resourceValues"]
+                properties:
+                  resourceType:
+                    description: The type of the resource. It must match one of the resource types specified in the referenced benchmark.
+                    type: string
+                  resourceValues:
+                    description:  List of resource values for the specified resource type.
+                    type: array
+                    items:
+                      type: integer
+              slos: # def of service level objectives
+                description:  List of resource values for the specified resource type.
+                type: array
+                items:
+                  type: object
+                  required: ["sloType", "prometheusUrl", "offset"]
+                  properties:
+                    sloType:
+                      description: The type of the SLO. It must match 'lag trend'.
+                      type: string
+                    prometheusUrl:
+                      description: Connection string for Promehteus.
+                      type: string
+                    offset:
+                      description: Hours by which the start and end timestamp will be shifted (for different timezones).
+                      type: integer
+                    properties:
+                        description: (Optional) SLO specific additional arguments.
+                        type: object
+                        additionalProperties: true
+                        x-kubernetes-map-type: "granular"
+                        default: {}
+              execution: # def execution config
+                description: Defines the overall parameter for the execution.
+                type: object
+                required: ["strategy", "duration", "repetitions", "restrictions"]
+                properties:
+                  strategy:
+                    description: Defines the used strategy for the execution, either 'LinearSearch' or 'BinarySearch'
+                    type: string
+                  duration:
+                    description: Defines the duration of each experiment in seconds.
+                    type: integer
+                  repetitions:
+                    description: Numper of repititions for each experiments.
+                    type: integer
+                  loadGenerationDelay:
+                    description: Seconds to wait between the start of the SUT and the load generator.
+                    type: integer
+                  restrictions:
+                    description: List of restriction strategys used to delimit the search space.
+                    type: array
+                    items:
+                      type: string
+              configOverrides:
+                description:  List of patchers that are used to override existing configurations.
+                type: array
+                items:
+                  type: object
+                  properties:
+                    patcher:
+                      description: Patcher used to patch a resource
+                      type: object
+                      required: ["type", "resource"]
+                      properties:
+                        type:
+                          description: Type of the Patcher.
+                          type: string
+                          default: ""
+                        resource:
+                          description: Specifies the Kubernetes resource to be patched.
+                          type: string
+                          default: ""
+                        properties:
+                          description: (Optional) Patcher specific additional arguments.
+                          type: object
+                          additionalProperties: true
+                          x-kubernetes-map-type: "granular"
+                          default: {}
+                    value:
+                      type: string
+          status:
+            type: object
+            properties:
+              executionState:
+                description: ""
+                type: string
+              executionDuration:
+                description: "Duration of the execution in seconds"
+                type: string
+    additionalPrinterColumns:
+    - name: STATUS
+      type: string
+      description: State of the execution
+      jsonPath: .status.executionState
+    - name: Duration
+      type: string
+      description: Duration of the execution
+      jsonPath: .status.executionDuration
+    - name: Age
+      type: date
+      jsonPath: .metadata.creationTimestamp
+    subresources:
+      status: {}
+  scope: Namespaced
diff --git a/theodolite/examples/operator/example-benchmark.yaml b/theodolite/examples/operator/example-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3452fff9c729d680890d6eafa685ce2f13b098d6
--- /dev/null
+++ b/theodolite/examples/operator/example-benchmark.yaml
@@ -0,0 +1,43 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc1-kstreams
+spec:
+  sut:
+    resources:
+      - configMap:
+         name: "example-configmap"
+         files:
+           - "uc1-kstreams-deployment.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+         name: "example-configmap"
+         files:
+            - uc1-load-generator-service.yaml
+            - uc1-load-generator-deployment.yaml
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc1-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            variableName: "NUM_SENSORS"
+            container: "workload-generator"
+        - type: "NumSensorsLoadGeneratorReplicaPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
\ No newline at end of file
diff --git a/theodolite/examples/operator/example-configmap.yaml b/theodolite/examples/operator/example-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..210ce32d3fc0f75b9ffce874d1fa0a1ea9bdc3cd
--- /dev/null
+++ b/theodolite/examples/operator/example-configmap.yaml
@@ -0,0 +1,87 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: example-configmap
+data:
+  uc1-kstreams-deployment.yaml: |-
+    apiVersion: apps/v1
+    kind: Deployment
+    metadata:
+      name: titan-ccp-aggregation
+    spec:
+      selector:
+        matchLabels:
+          app: titan-ccp-aggregation
+      replicas: 1
+      template:
+        metadata:
+          labels:
+            app: titan-ccp-aggregation
+        spec:
+          terminationGracePeriodSeconds: 0
+          containers:
+            - name: uc-application
+              image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+              env:
+                - name: KAFKA_BOOTSTRAP_SERVERS
+                  value: "theodolite-cp-kafka:9092"
+                - name: SCHEMA_REGISTRY_URL
+                  value: "http://theodolite-cp-schema-registry:8081"
+                - name: JAVA_OPTS
+                  value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+                - name: COMMIT_INTERVAL_MS # Set as default for the applications
+                  value: "100"
+              resources:
+                limits:
+                  memory: 4Gi
+                  cpu: 1000m
+  uc1-load-generator-deployment.yaml: |
+    apiVersion: apps/v1
+    kind: Deployment
+    metadata:
+      name: titan-ccp-load-generator
+    spec:
+      selector:
+        matchLabels:
+          app: titan-ccp-load-generator
+      replicas: 1
+      template:
+        metadata:
+          labels:
+            app: titan-ccp-load-generator
+        spec:
+          terminationGracePeriodSeconds: 0
+          containers:
+            - name: workload-generator
+              image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
+              ports:
+                - containerPort: 5701
+                  name: coordination
+              env:
+                - name: KUBERNETES_NAMESPACE
+                  valueFrom:
+                    fieldRef:
+                      fieldPath: metadata.namespace
+                - name: KUBERNETES_DNS_NAME
+                  value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+                - name: KAFKA_BOOTSTRAP_SERVERS
+                  value: "theodolite-cp-kafka:9092"
+                - name: SCHEMA_REGISTRY_URL
+                  value: "http://theodolite-cp-schema-registry:8081"
+  uc1-load-generator-service.yaml: |
+    apiVersion: v1
+    kind: Service
+    metadata:
+      name: titan-ccp-load-generator
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      type: ClusterIP
+      clusterIP: None
+      selector:
+        app: titan-ccp-load-generator
+      ports:
+        - name: coordination
+          port: 5701
+          targetPort: 5701
+          protocol: TCP
\ No newline at end of file
diff --git a/theodolite/examples/operator/example-execution.yaml b/theodolite/examples/operator/example-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2efb6e9a2bb6c08354b57a83506a601ac0ed96e
--- /dev/null
+++ b/theodolite/examples/operator/example-execution.yaml
@@ -0,0 +1,58 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: theodolite-example-execution
+spec:
+  benchmark: "uc1-kstreams"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      prometheusUrl: "http://prometheus-operated:9090"
+      offset: 0
+      properties:
+        threshold: 2000
+        externalSloUrl: "http://localhost:80/evaluate-slope"
+        warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
+  # - patcher:
+  #     type: "NodeSelectorPatcher"
+  #     resource: "uc1-load-generator-deployment.yaml"
+  #     properties:
+  #       variableName: "env"
+  #     value: "prod"
+  # - patcher:
+  #     type: "NodeSelectorPatcher"
+  #     resource: "uc1-kstreams-deployment.yaml"
+  #     properties:
+  #       variableName: "env"
+  #   value: "prod"
+  # - patcher:
+  #     type: "ResourceLimitPatcher"
+  #     resource: "uc1-kstreams-deployment.yaml"
+  #     properties:
+  #       container: "uc-application"
+  #       limitedResource: "cpu"
+  #   value: "1000m"
+  # - patcher:
+  #     type: "ResourceLimitPatcher"
+  #     resource: "uc1-kstreams-deployment.yaml"
+  #     properties:
+  #       container: "uc-application"
+  #       limitedResource: "memory"
+  #   value: "2Gi"
+  #  - patcher:
+  #      type: "SchedulerNamePatcher"
+  #      resource: "uc1-kstreams-deployment.yaml"
+  #    value: "random-scheduler"
diff --git a/theodolite/examples/resources/uc1-kstreams-deployment.yaml b/theodolite/examples/resources/uc1-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fdd1ff867ac83beb10856baec53569c88169232e
--- /dev/null
+++ b/theodolite/examples/resources/uc1-kstreams-deployment.yaml
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
\ No newline at end of file
diff --git a/theodolite/examples/resources/uc1-load-generator-deployment.yaml b/theodolite/examples/resources/uc1-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9f9ccc6ae39407bb1f027e1e23cb152944b869e0
--- /dev/null
+++ b/theodolite/examples/resources/uc1-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/theodolite/examples/resources/uc1-load-generator-service.yaml b/theodolite/examples/resources/uc1-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite/examples/resources/uc1-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite/examples/standalone/example-benchmark.yaml b/theodolite/examples/standalone/example-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d5df81b163c1285f12971d08518dddf4b451d0f
--- /dev/null
+++ b/theodolite/examples/standalone/example-benchmark.yaml
@@ -0,0 +1,40 @@
+name: "uc1-kstreams"
+  infrastructure: []
+  sut:
+    resources:
+      - configMap:
+         name: "example-configmap"
+         files:
+           - "uc1-kstreams-deployment.yaml"
+  loadGenerator:
+    resources:
+      - configMap:
+         name: "example-configmap"
+         files:
+            - uc1-load-generator-service.yaml
+            - uc1-load-generator-deployment.yaml
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc1-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc1-load-generator-deployment.yaml"
+        properties:
+          variableName: "NUM_SENSORS"
+          container: "workload-generator"
+      - type: "NumSensorsLoadGeneratorReplicaPatcher"
+        resource: "uc1-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "15000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite/examples/standalone/example-execution.yaml b/theodolite/examples/standalone/example-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6e649df957fe1d5dd962fdd5fe5152808e722de6
--- /dev/null
+++ b/theodolite/examples/standalone/example-execution.yaml
@@ -0,0 +1,24 @@
+name: example-execution
+benchmark: "uc1-kstreams"
+load:
+  loadType: "NumSensors"
+  loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+resources:
+  resourceType: "Instances"
+  resourceValues: [1, 2, 3, 4, 5]
+slos:
+  - sloType: "lag trend"
+    prometheusUrl: "http://prometheus-operated:9090"
+    offset: 0
+    properties:
+      threshold: 2000
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      warmup: 60 # in seconds
+execution:
+  strategy: "LinearSearch"
+  duration: 300 # in seconds
+  repetitions: 1
+  loadGenerationDelay: 30 # in seconds, optional field, default is 0 seconds
+  restrictions:
+    - "LowerBound"
+configOverrides: []
diff --git a/theodolite/gradle.properties b/theodolite/gradle.properties
new file mode 100644
index 0000000000000000000000000000000000000000..d7e4187c25e76dfb440650274b2d383f75a32242
--- /dev/null
+++ b/theodolite/gradle.properties
@@ -0,0 +1,8 @@
+#Gradle properties
+quarkusPluginId=io.quarkus
+quarkusPluginVersion=1.10.3.Final
+quarkusPlatformGroupId=io.quarkus
+quarkusPlatformArtifactId=quarkus-universe-bom
+quarkusPlatformVersion=1.10.3.Final
+
+org.gradle.logging.level=INFO
\ No newline at end of file
diff --git a/theodolite/gradle/wrapper/gradle-wrapper.jar b/theodolite/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..62d4c053550b91381bbd28b1afc82d634bf73a8a
Binary files /dev/null and b/theodolite/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/theodolite/gradle/wrapper/gradle-wrapper.properties b/theodolite/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 0000000000000000000000000000000000000000..bb8b2fc26b2e572c79d7212a4f6f11057c6787f7
--- /dev/null
+++ b/theodolite/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,5 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.5.1-bin.zip
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/theodolite/gradlew b/theodolite/gradlew
new file mode 100755
index 0000000000000000000000000000000000000000..fbd7c515832dab7b01092e80db76e5e03fe32d29
--- /dev/null
+++ b/theodolite/gradlew
@@ -0,0 +1,185 @@
+#!/usr/bin/env sh
+
+#
+# Copyright 2015 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##############################################################################
+##
+##  Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+    ls=`ls -ld "$PRG"`
+    link=`expr "$ls" : '.*-> \(.*\)$'`
+    if expr "$link" : '/.*' > /dev/null; then
+        PRG="$link"
+    else
+        PRG=`dirname "$PRG"`"/$link"
+    fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+    echo "$*"
+}
+
+die () {
+    echo
+    echo "$*"
+    echo
+    exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+  CYGWIN* )
+    cygwin=true
+    ;;
+  Darwin* )
+    darwin=true
+    ;;
+  MINGW* )
+    msys=true
+    ;;
+  NONSTOP* )
+    nonstop=true
+    ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+        # IBM's JDK on AIX uses strange locations for the executables
+        JAVACMD="$JAVA_HOME/jre/sh/java"
+    else
+        JAVACMD="$JAVA_HOME/bin/java"
+    fi
+    if [ ! -x "$JAVACMD" ] ; then
+        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+else
+    JAVACMD="java"
+    which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+    MAX_FD_LIMIT=`ulimit -H -n`
+    if [ $? -eq 0 ] ; then
+        if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+            MAX_FD="$MAX_FD_LIMIT"
+        fi
+        ulimit -n $MAX_FD
+        if [ $? -ne 0 ] ; then
+            warn "Could not set maximum file descriptor limit: $MAX_FD"
+        fi
+    else
+        warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+    fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+    GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
+    APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+    CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+    
+    JAVACMD=`cygpath --unix "$JAVACMD"`
+
+    # We build the pattern for arguments to be converted via cygpath
+    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+    SEP=""
+    for dir in $ROOTDIRSRAW ; do
+        ROOTDIRS="$ROOTDIRS$SEP$dir"
+        SEP="|"
+    done
+    OURCYGPATTERN="(^($ROOTDIRS))"
+    # Add a user-defined pattern to the cygpath arguments
+    if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+        OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+    fi
+    # Now convert the arguments - kludge to limit ourselves to /bin/sh
+    i=0
+    for arg in "$@" ; do
+        CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+        CHECK2=`echo "$arg"|egrep -c "^-"`                                 ### Determine if an option
+
+        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition
+            eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+        else
+            eval `echo args$i`="\"$arg\""
+        fi
+        i=`expr $i + 1`
+    done
+    case $i in
+        0) set -- ;;
+        1) set -- "$args0" ;;
+        2) set -- "$args0" "$args1" ;;
+        3) set -- "$args0" "$args1" "$args2" ;;
+        4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+        5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+        6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+        7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+        8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+        9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+    esac
+fi
+
+# Escape application args
+save () {
+    for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+    echo " "
+}
+APP_ARGS=`save "$@"`
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+exec "$JAVACMD" "$@"
diff --git a/benchmarks/gradlew.bat b/theodolite/gradlew.bat
old mode 100644
new mode 100755
similarity index 70%
rename from benchmarks/gradlew.bat
rename to theodolite/gradlew.bat
index 0f8d5937c4ad18feb44a19e55ad1e37cc159260f..a9f778a7a964b6f01c904ee667903f005d6df556
--- a/benchmarks/gradlew.bat
+++ b/theodolite/gradlew.bat
@@ -1,3 +1,19 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem      https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
 @if "%DEBUG%" == "" @echo off
 @rem ##########################################################################
 @rem
@@ -13,8 +29,11 @@ if "%DIRNAME%" == "" set DIRNAME=.
 set APP_BASE_NAME=%~n0
 set APP_HOME=%DIRNAME%
 
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
 @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS="-Xmx64m"
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
 
 @rem Find java.exe
 if defined JAVA_HOME goto findJavaFromJavaHome
@@ -65,6 +84,7 @@ set CMD_LINE_ARGS=%*
 
 set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
 
+
 @rem Execute Gradle
 "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
 
diff --git a/theodolite/settings.gradle b/theodolite/settings.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..a726b4afa4cfdc7433dbd408808ab0fd6a766d6a
--- /dev/null
+++ b/theodolite/settings.gradle
@@ -0,0 +1,11 @@
+pluginManagement {
+    repositories {
+        mavenLocal()
+        mavenCentral()
+        gradlePluginPortal()
+    }
+    plugins {
+        id "${quarkusPluginId}" version "${quarkusPluginVersion}"
+    }
+}
+rootProject.name='theodolite'
diff --git a/theodolite/src/main/docker/Dockerfile.fast-jar b/theodolite/src/main/docker/Dockerfile.fast-jar
new file mode 100644
index 0000000000000000000000000000000000000000..16853dd8f064565ae017bee9dae3597b63085006
--- /dev/null
+++ b/theodolite/src/main/docker/Dockerfile.fast-jar
@@ -0,0 +1,54 @@
+####
+# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
+#
+# Before building the container image run:
+#
+# ./gradlew build -Dquarkus.package.type=fast-jar
+#
+# Then, build the image with:
+#
+# docker build -f src/main/docker/Dockerfile.fast-jar -t quarkus/theodolite-fast-jar .
+#
+# Then run the container using:
+#
+# docker run -i --rm -p 8080:8080 quarkus/theodolite-fast-jar
+#
+# If you want to include the debug port into your docker image
+# you will have to expose the debug port (default 5005) like this :  EXPOSE 8080 5050
+#
+# Then run the container using :
+#
+# docker run -i --rm -p 8080:8080 -p 5005:5005 -e JAVA_ENABLE_DEBUG="true" quarkus/theodolite-fast-jar
+#
+###
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3 
+
+ARG JAVA_PACKAGE=java-11-openjdk-headless
+ARG RUN_JAVA_VERSION=1.3.8
+ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'
+# Install java and the run-java script
+# Also set up permissions for user `1001`
+RUN microdnf install curl ca-certificates ${JAVA_PACKAGE} \
+    && microdnf update \
+    && microdnf clean all \
+    && mkdir /deployments \
+    && chown 1001 /deployments \
+    && chmod "g+rwX" /deployments \
+    && chown 1001:root /deployments \
+    && curl https://repo1.maven.org/maven2/io/fabric8/run-java-sh/${RUN_JAVA_VERSION}/run-java-sh-${RUN_JAVA_VERSION}-sh.sh -o /deployments/run-java.sh \
+    && chown 1001 /deployments/run-java.sh \
+    && chmod 540 /deployments/run-java.sh \
+    && echo "securerandom.source=file:/dev/urandom" >> /etc/alternatives/jre/lib/security/java.security
+
+# Configure the JAVA_OPTIONS, you can add -XshowSettings:vm to also display the heap size.
+ENV JAVA_OPTIONS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
+# We make four distinct layers so if there are application changes the library layers can be re-used
+COPY --chown=1001 build/quarkus-app/lib/ /deployments/lib/
+COPY --chown=1001 build/quarkus-app/*.jar /deployments/
+COPY --chown=1001 build/quarkus-app/app/ /deployments/app/
+COPY --chown=1001 build/quarkus-app/quarkus/ /deployments/quarkus/
+
+EXPOSE 8080
+USER 1001
+
+ENTRYPOINT [ "/deployments/run-java.sh" ]
diff --git a/theodolite/src/main/docker/Dockerfile.jvm b/theodolite/src/main/docker/Dockerfile.jvm
new file mode 100644
index 0000000000000000000000000000000000000000..4d51240e0225bb571cc4a625e40c9ec76fd8f10d
--- /dev/null
+++ b/theodolite/src/main/docker/Dockerfile.jvm
@@ -0,0 +1,51 @@
+####
+# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
+#
+# Before building the container image run:
+#
+# ./gradlew build
+#
+# Then, build the image with:
+#
+# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/theodolite-jvm .
+#
+# Then run the container using:
+#
+# docker run -i --rm -p 8080:8080 quarkus/theodolite-jvm
+#
+# If you want to include the debug port into your docker image
+# you will have to expose the debug port (default 5005) like this :  EXPOSE 8080 5050
+#
+# Then run the container using :
+#
+# docker run -i --rm -p 8080:8080 -p 5005:5005 -e JAVA_ENABLE_DEBUG="true" quarkus/theodolite-jvm
+#
+###
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3 
+
+ARG JAVA_PACKAGE=java-11-openjdk-headless
+ARG RUN_JAVA_VERSION=1.3.8
+ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'
+# Install java and the run-java script
+# Also set up permissions for user `1001`
+RUN microdnf install curl ca-certificates ${JAVA_PACKAGE} \
+    && microdnf update \
+    && microdnf clean all \
+    && mkdir /deployments \
+    && chown 1001 /deployments \
+    && chmod "g+rwX" /deployments \
+    && chown 1001:root /deployments \
+    && curl https://repo1.maven.org/maven2/io/fabric8/run-java-sh/${RUN_JAVA_VERSION}/run-java-sh-${RUN_JAVA_VERSION}-sh.sh -o /deployments/run-java.sh \
+    && chown 1001 /deployments/run-java.sh \
+    && chmod 540 /deployments/run-java.sh \
+    && echo "securerandom.source=file:/dev/urandom" >> /etc/alternatives/jre/lib/security/java.security
+
+# Configure the JAVA_OPTIONS, you can add -XshowSettings:vm to also display the heap size.
+ENV JAVA_OPTIONS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
+COPY build/lib/* /deployments/lib/
+COPY build/*-runner.jar /deployments/app.jar
+
+EXPOSE 8080
+USER 1001
+
+ENTRYPOINT [ "/deployments/run-java.sh" ]
diff --git a/theodolite/src/main/docker/Dockerfile.native b/theodolite/src/main/docker/Dockerfile.native
new file mode 100644
index 0000000000000000000000000000000000000000..95ef4fb51d7dc1ac520fb4c5a9af1b2d0a32fd09
--- /dev/null
+++ b/theodolite/src/main/docker/Dockerfile.native
@@ -0,0 +1,27 @@
+####
+# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode
+#
+# Before building the container image run:
+#
+# ./gradlew build -Dquarkus.package.type=native
+#
+# Then, build the image with:
+#
+# docker build -f src/main/docker/Dockerfile.native -t quarkus/theodolite .
+#
+# Then run the container using:
+#
+# docker run -i --rm -p 8080:8080 quarkus/theodolite
+#
+###
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
+WORKDIR /deployments
+RUN chown 1001 /deployments \
+    && chmod "g+rwX" /deployments \
+    && chown 1001:root /deployments
+COPY --chown=1001:root build/*-runner /deployments/application
+
+EXPOSE 8080
+USER 1001
+
+CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/Benchmark.kt b/theodolite/src/main/kotlin/theodolite/benchmark/Benchmark.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cf2fac7337d79c1c5daf2b0fac070200cf27f9a5
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/Benchmark.kt
@@ -0,0 +1,31 @@
+package theodolite.benchmark
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.ConfigurationOverride
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+/**
+ * A Benchmark contains:
+ * - The [Resource]s that can be scaled for the benchmark.
+ * - The [LoadDimension]s that can be scaled the benchmark.
+ * - additional [ConfigurationOverride]s.
+ */
+@RegisterForReflection
+interface Benchmark {
+
+    fun setupInfrastructure()
+    fun teardownInfrastructure()
+
+    /**
+     * Builds a Deployment that can be deployed.
+     * @return a BenchmarkDeployment.
+     */
+    fun buildDeployment(
+        load: LoadDimension,
+        res: Resource,
+        configurationOverrides: List<ConfigurationOverride?>,
+        loadGenerationDelay: Long,
+        afterTeardownDelay: Long
+    ): BenchmarkDeployment
+}
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/BenchmarkDeployment.kt b/theodolite/src/main/kotlin/theodolite/benchmark/BenchmarkDeployment.kt
new file mode 100644
index 0000000000000000000000000000000000000000..fd01ecd986775ef704949743fef0d19f5492e9a6
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/BenchmarkDeployment.kt
@@ -0,0 +1,19 @@
+package theodolite.benchmark
+
+/**
+ *  A BenchmarkDeployment contains the necessary infrastructure to execute a benchmark.
+ *  Therefore it has the capabilities to set up the deployment of a benchmark and to tear it down.
+ */
+interface BenchmarkDeployment {
+
+    /**
+     * Setup a benchmark. This method is responsible for deploying the resources of a benchmark.
+     */
+    fun setup()
+
+    /**
+     *  Tears down a benchmark. This method is responsible for deleting the deployed
+     *  resources and to reset the used infrastructure.
+     */
+    fun teardown()
+}
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/BenchmarkExecution.kt b/theodolite/src/main/kotlin/theodolite/benchmark/BenchmarkExecution.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f2dda487d390c5f771e4f47c0f9c7ebf2cf971e7
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/BenchmarkExecution.kt
@@ -0,0 +1,90 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.ConfigurationOverride
+import kotlin.properties.Delegates
+
+/**
+ * This class represents the configuration for an execution of a benchmark.
+ * An example for this is the BenchmarkExecution.yaml
+ * A BenchmarkExecution consists of:
+ *  - A [name].
+ *  - The [benchmark] that should be executed.
+ *  - The [load] that should be checked in the benchmark.
+ *  - The [resources] that should be checked in the benchmark.
+ *  - A list of [slos] that are used for the evaluation of the experiments.
+ *  - An [execution] that encapsulates: the strategy, the duration, and the restrictions
+ *  for the execution of the benchmark.
+ *  - [configOverrides] additional configurations.
+ *  This class is used for parsing(in [theodolite.execution.TheodoliteStandalone]) and
+ *  for the deserializing in the [theodolite.execution.operator.TheodoliteOperator].
+ *  @constructor construct an empty BenchmarkExecution.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class BenchmarkExecution : KubernetesResource {
+    var executionId: Int = 0
+    lateinit var name: String
+    lateinit var benchmark: String
+    lateinit var load: LoadDefinition
+    lateinit var resources: ResourceDefinition
+    lateinit var slos: List<Slo>
+    lateinit var execution: Execution
+    lateinit var configOverrides: MutableList<ConfigurationOverride?>
+
+    /**
+     * This execution encapsulates the [strategy], the [duration], the [repetitions], and the [restrictions]
+     *  which are used for the concrete benchmark experiments.
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class Execution : KubernetesResource {
+        lateinit var strategy: String
+        var duration by Delegates.notNull<Long>()
+        var repetitions by Delegates.notNull<Int>()
+        lateinit var restrictions: List<String>
+        var loadGenerationDelay = 0L
+        var afterTeardownDelay = 5L
+    }
+
+    /**
+     * Measurable metric.
+     * [sloType] determines the type of the metric.
+     * It is evaluated using the [theodolite.evaluation.ExternalSloChecker] by data measured by Prometheus.
+     * The evaluation checks if a [threshold] is reached or not.
+     * [offset] determines the shift in hours by which the start and end timestamps should be shifted.
+     * The [warmup] determines after which time the metric should be evaluated to avoid starting interferences.
+     * The [warmup] time unit depends on the Slo: for the lag trend it is in seconds.
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class Slo : KubernetesResource {
+        lateinit var sloType: String
+        lateinit var prometheusUrl: String
+        var offset by Delegates.notNull<Int>()
+        lateinit var properties: MutableMap<String, String>
+    }
+
+    /**
+     * Represents a Load that should be created and checked.
+     * It can be set to [loadValues].
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class LoadDefinition : KubernetesResource {
+        lateinit var loadType: String
+        lateinit var loadValues: List<Int>
+    }
+
+    /**
+     * Represents a resource that can be scaled to [resourceValues].
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class ResourceDefinition : KubernetesResource {
+        lateinit var resourceType: String
+        lateinit var resourceValues: List<Int>
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/ConfigMapResourceSet.kt b/theodolite/src/main/kotlin/theodolite/benchmark/ConfigMapResourceSet.kt
new file mode 100644
index 0000000000000000000000000000000000000000..273a13170e77ae9e2f5f09869ebbc5cc06185715
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/ConfigMapResourceSet.kt
@@ -0,0 +1,75 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.KubernetesClientException
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromString
+import theodolite.util.DeploymentFailedException
+import theodolite.util.YamlParserFromString
+import java.lang.IllegalArgumentException
+import java.lang.IllegalStateException
+
+private val logger = KotlinLogging.logger {}
+
+@RegisterForReflection
+@JsonDeserialize
+class ConfigMapResourceSet: ResourceSet, KubernetesResource {
+    lateinit var name: String
+    lateinit var files: List<String> // load all files, iff files is not set
+
+    @OptIn(ExperimentalStdlibApi::class)
+    override fun getResourceSet(client: NamespacedKubernetesClient): Collection<Pair<String, KubernetesResource>> {
+        val loader = K8sResourceLoaderFromString(client)
+        var resources: Map<String, String>
+
+        try {
+            resources = client
+                .configMaps()
+                .withName(name)
+                .get()
+                .data
+                .filter { it.key.endsWith(".yaml") } // consider only yaml files, e.g. ignore readme files
+        } catch (e: KubernetesClientException) {
+            throw DeploymentFailedException("can not find or read configmap:  $name", e)
+        } catch (e: IllegalStateException) {
+            throw DeploymentFailedException("can not find configmap or data section is null $name", e)
+        }
+
+        if (::files.isInitialized){
+            resources = resources
+                .filter { files.contains(it.key) }
+
+            if (resources.size != files.size) {
+                throw  DeploymentFailedException("Could not find all specified Kubernetes manifests files")
+            }
+        }
+
+        return try {
+            resources
+                .map { Pair(
+                    getKind(resource = it.value),
+                    it) }
+                .map {
+                    Pair(
+                        it.second.key,
+                        loader.loadK8sResource(it.first, it.second.value)) }
+        } catch (e: IllegalArgumentException) {
+            throw  DeploymentFailedException("Can not creat resource set from specified configmap", e)
+        }
+
+    }
+
+    private fun getKind(resource: String): String {
+        val parser = YamlParserFromString()
+        val resourceAsMap = parser.parse(resource, HashMap<String, String>()::class.java)
+
+        return try {
+            resourceAsMap?.get("kind") !!
+        } catch (e: NullPointerException) {
+            throw DeploymentFailedException( "Could not find field kind of Kubernetes resource: ${resourceAsMap?.get("name")}", e)
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/FileSystemResourceSet.kt b/theodolite/src/main/kotlin/theodolite/benchmark/FileSystemResourceSet.kt
new file mode 100644
index 0000000000000000000000000000000000000000..92df1bec3cd6f21b1f830e73b466f70e37a9f4c8
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/FileSystemResourceSet.kt
@@ -0,0 +1,66 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+import theodolite.util.DeploymentFailedException
+import theodolite.util.YamlParserFromFile
+import java.io.File
+import java.io.FileNotFoundException
+import java.lang.IllegalArgumentException
+
+private val logger = KotlinLogging.logger {}
+
+@RegisterForReflection
+@JsonDeserialize
+class FileSystemResourceSet: ResourceSet, KubernetesResource {
+    lateinit var path: String
+    lateinit var files: List<String>
+
+    override fun getResourceSet(client: NamespacedKubernetesClient): Collection<Pair<String, KubernetesResource>> {
+
+        //if files is set ...
+        if(::files.isInitialized){
+            return files
+                    .map { loadSingleResource(resourceURL = it, client = client) }
+        }
+
+        return try {
+            File(path)
+                .list() !!
+                .filter { it.endsWith(".yaml") } // consider only yaml files, e.g. ignore readme files
+                .map {
+                    loadSingleResource(resourceURL = it, client = client)
+                }
+        } catch (e: NullPointerException) {
+            throw  DeploymentFailedException("Could not load files located in $path", e)
+        }
+    }
+
+    private fun loadSingleResource(resourceURL: String, client: NamespacedKubernetesClient): Pair<String, KubernetesResource> {
+        val parser = YamlParserFromFile()
+        val loader = K8sResourceLoaderFromFile(client)
+        val resourcePath = "$path/$resourceURL"
+        lateinit var kind: String
+
+        try {
+            kind = parser.parse(resourcePath, HashMap<String, String>()::class.java)?.get("kind")!!
+        } catch (e: NullPointerException) {
+            throw DeploymentFailedException("Can not get Kind from resource $resourcePath", e)
+        } catch (e: FileNotFoundException){
+            throw DeploymentFailedException("File $resourcePath not found", e)
+
+        }
+
+        return try {
+            val k8sResource = loader.loadK8sResource(kind, resourcePath)
+            Pair(resourceURL, k8sResource)
+        } catch (e: IllegalArgumentException) {
+            throw DeploymentFailedException("Could not load resource: $resourcePath", e)
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/KubernetesBenchmark.kt b/theodolite/src/main/kotlin/theodolite/benchmark/KubernetesBenchmark.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0b81f8701f92a95662efef6e0d58839c9a2f6f3b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/KubernetesBenchmark.kt
@@ -0,0 +1,122 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.annotation.JsonInclude
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.k8s.K8sManager
+import theodolite.k8s.resourceLoader.K8sResourceLoader
+import theodolite.patcher.PatcherFactory
+import theodolite.util.*
+
+
+private val logger = KotlinLogging.logger {}
+
+private var DEFAULT_NAMESPACE = "default"
+private var DEFAULT_THEODOLITE_APP_RESOURCES = "./benchmark-resources"
+
+/**
+ * Represents a benchmark in Kubernetes. An example for this is the BenchmarkType.yaml
+ * Contains a of:
+ * - [name] of the benchmark,
+ * - [appResource] list of the resources that have to be deployed for the benchmark,
+ * - [loadGenResource] resource that generates the load,
+ * - [resourceTypes] types of scaling resources,
+ * - [loadTypes] types of loads that can be scaled for the benchmark,
+ * - [kafkaConfig] for the [theodolite.k8s.TopicManager],
+ * - [namespace] for the client,
+ * - [path] under which the resource yamls can be found.
+ *
+ *  This class is used for the parsing(in the [theodolite.execution.TheodoliteStandalone]) and
+ *  for the deserializing in the [theodolite.execution.operator.TheodoliteOperator].
+ * @constructor construct an empty Benchmark.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class KubernetesBenchmark : KubernetesResource, Benchmark {
+    lateinit var name: String
+    lateinit var resourceTypes: List<TypeName>
+    lateinit var loadTypes: List<TypeName>
+    lateinit var kafkaConfig: KafkaConfig
+    lateinit var infrastructure: Resources
+    lateinit var sut: Resources
+    lateinit var loadGenerator: Resources
+    var namespace = System.getenv("NAMESPACE") ?: DEFAULT_NAMESPACE
+
+    @Transient
+    private val client: NamespacedKubernetesClient = DefaultKubernetesClient().inNamespace(namespace)
+
+    /**
+     * Loads [KubernetesResource]s.
+     * It first loads them via the [YamlParserFromFile] to check for their concrete type and afterwards initializes them using
+     * the [K8sResourceLoader]
+     */
+    fun loadKubernetesResources(resourceSet: List<ResourceSets>): Collection<Pair<String, KubernetesResource>> {
+        return resourceSet.flatMap { it.loadResourceSet(this.client) }
+    }
+
+    override fun setupInfrastructure() {
+        val kubernetesManager = K8sManager(this.client)
+        loadKubernetesResources(this.infrastructure.resources)
+            .map{it.second}
+            .forEach { kubernetesManager.deploy(it) }
+    }
+
+    override fun teardownInfrastructure() {
+        val kubernetesManager = K8sManager(this.client)
+        loadKubernetesResources(this.infrastructure.resources)
+            .map{it.second}
+            .forEach { kubernetesManager.remove(it) }
+        }
+
+    /**
+     * Builds a deployment.
+     * First loads all required resources and then patches them to the concrete load and resources for the experiment.
+     * Afterwards patches additional configurations(cluster depending) into the resources.
+     * @param load concrete load that will be benchmarked in this experiment.
+     * @param res concrete resource that will be scaled for this experiment.
+     * @param configurationOverrides
+     * @return a [BenchmarkDeployment]
+     */
+    override fun buildDeployment(
+        load: LoadDimension,
+        res: Resource,
+        configurationOverrides: List<ConfigurationOverride?>,
+        loadGenerationDelay: Long,
+        afterTeardownDelay: Long
+    ): BenchmarkDeployment {
+        logger.info { "Using $namespace as namespace." }
+
+        val appResources = loadKubernetesResources(this.sut.resources)
+        val loadGenResources = loadKubernetesResources(this.loadGenerator.resources)
+
+        val patcherFactory = PatcherFactory()
+
+        // patch the load dimension the resources
+        load.getType().forEach { patcherDefinition ->
+            patcherFactory.createPatcher(patcherDefinition, loadGenResources).patch(load.get().toString())
+        }
+        res.getType().forEach { patcherDefinition ->
+            patcherFactory.createPatcher(patcherDefinition, appResources).patch(res.get().toString())
+        }
+
+        // Patch the given overrides
+        configurationOverrides.forEach { override ->
+            override?.let {
+                patcherFactory.createPatcher(it.patcher, appResources + loadGenResources).patch(override.value)
+            }
+        }
+        return KubernetesBenchmarkDeployment(
+            appResources = appResources.map { it.second },
+            loadGenResources = loadGenResources.map { it.second },
+            loadGenerationDelay = loadGenerationDelay,
+            afterTeardownDelay = afterTeardownDelay,
+            kafkaConfig = hashMapOf("bootstrap.servers" to kafkaConfig.bootstrapServer),
+            topics = kafkaConfig.topics,
+            client = this.client
+        )
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/KubernetesBenchmarkDeployment.kt b/theodolite/src/main/kotlin/theodolite/benchmark/KubernetesBenchmarkDeployment.kt
new file mode 100644
index 0000000000000000000000000000000000000000..423ac92c654ff55057796d9642c2cb408bc62fe5
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/KubernetesBenchmarkDeployment.kt
@@ -0,0 +1,71 @@
+package theodolite.benchmark
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import org.apache.kafka.clients.admin.NewTopic
+import theodolite.k8s.K8sManager
+import theodolite.k8s.ResourceByLabelHandler
+import theodolite.k8s.TopicManager
+import theodolite.util.KafkaConfig
+import java.time.Duration
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Organizes the deployment of benchmarks in Kubernetes.
+ *
+ * @param namespace to operate in.
+ * @param resources List of [KubernetesResource] that are managed.
+ * @param kafkaConfig for the organization of Kafka topics.
+ * @param topics List of topics that are created or deleted.
+ */
+@RegisterForReflection
+class KubernetesBenchmarkDeployment(
+    val appResources: List<KubernetesResource>,
+    val loadGenResources: List<KubernetesResource>,
+    private val loadGenerationDelay: Long,
+    private val afterTeardownDelay: Long,
+    private val kafkaConfig: HashMap<String, Any>,
+    private val topics: List<KafkaConfig.TopicWrapper>,
+    private val client: NamespacedKubernetesClient
+) : BenchmarkDeployment {
+    private val kafkaController = TopicManager(this.kafkaConfig)
+    private val kubernetesManager = K8sManager(client)
+    private val LAG_EXPORTER_POD_LABEL_NAME = "app.kubernetes.io/name"
+    private val LAG_EXPORTER_POD_LABEL_VALUE = "kafka-lag-exporter"
+
+    /**
+     * Setup a [KubernetesBenchmark] using the [TopicManager] and the [K8sManager]:
+     *  - Create the needed topics.
+     *  - Deploy the needed resources.
+     */
+    override fun setup() {
+        val kafkaTopics = this.topics.filter { !it.removeOnly }
+            .map { NewTopic(it.name, it.numPartitions, it.replicationFactor) }
+        kafkaController.createTopics(kafkaTopics)
+        appResources.forEach { kubernetesManager.deploy(it) }
+        logger.info { "Wait ${this.loadGenerationDelay} seconds before starting the load generator." }
+        Thread.sleep(Duration.ofSeconds(this.loadGenerationDelay).toMillis())
+        loadGenResources.forEach { kubernetesManager.deploy(it) }
+    }
+
+    /**
+     * Tears down a [KubernetesBenchmark]:
+     *  - Reset the Kafka Lag Exporter.
+     *  - Remove the used topics.
+     *  - Remove the [KubernetesResource]s.
+     */
+    override fun teardown() {
+        loadGenResources.forEach { kubernetesManager.remove(it) }
+        appResources.forEach { kubernetesManager.remove(it) }
+        kafkaController.removeTopics(this.topics.map { topic -> topic.name })
+        ResourceByLabelHandler(client).removePods(
+            labelName = LAG_EXPORTER_POD_LABEL_NAME,
+            labelValue = LAG_EXPORTER_POD_LABEL_VALUE
+        )
+        logger.info { "Teardown complete. Wait $afterTeardownDelay ms to let everything come down." }
+        Thread.sleep(Duration.ofSeconds(afterTeardownDelay).toMillis())
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/ResourceSet.kt b/theodolite/src/main/kotlin/theodolite/benchmark/ResourceSet.kt
new file mode 100644
index 0000000000000000000000000000000000000000..19fc85845ae99c7a5e4f7369db4b6cd383c3131b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/ResourceSet.kt
@@ -0,0 +1,13 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+@RegisterForReflection
+@JsonDeserialize
+interface ResourceSet: KubernetesResource {
+
+    fun getResourceSet(client: NamespacedKubernetesClient): Collection<Pair<String, KubernetesResource>>
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/ResourceSets.kt b/theodolite/src/main/kotlin/theodolite/benchmark/ResourceSets.kt
new file mode 100644
index 0000000000000000000000000000000000000000..a4fe443e7f304c411792ee06c32592ba3c9e692a
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/ResourceSets.kt
@@ -0,0 +1,32 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.annotation.JsonInclude
+import com.fasterxml.jackson.annotation.JsonProperty
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.util.DeploymentFailedException
+
+@JsonDeserialize
+@RegisterForReflection
+class ResourceSets: KubernetesResource {
+    @JsonProperty("configMap")
+    @JsonInclude(JsonInclude.Include.NON_NULL)
+    var  configMap: ConfigMapResourceSet? = null
+
+    @JsonProperty("fileSystem")
+    @JsonInclude(JsonInclude.Include.NON_NULL)
+    var fileSystem: FileSystemResourceSet? = null
+
+    fun loadResourceSet(client: NamespacedKubernetesClient): Collection<Pair<String, KubernetesResource>> {
+        return if (::configMap != null) {
+            configMap?.getResourceSet(client= client) !!
+            } else if (::fileSystem != null) {
+            fileSystem?.getResourceSet(client= client ) !!
+            } else {
+                throw  DeploymentFailedException("could not load resourceSet.")
+            }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/benchmark/Resources.kt b/theodolite/src/main/kotlin/theodolite/benchmark/Resources.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0187735b8fd273419874942cb7ed68797732c84c
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/benchmark/Resources.kt
@@ -0,0 +1,13 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import com.fasterxml.jackson.databind.annotation.JsonSerialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+@JsonDeserialize
+@RegisterForReflection
+class Resources {
+
+    lateinit var resources: List<ResourceSets>
+
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/AnalysisExecutor.kt b/theodolite/src/main/kotlin/theodolite/evaluation/AnalysisExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..281c68e318784ee8206473cd014f814b3f5152a9
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/AnalysisExecutor.kt
@@ -0,0 +1,88 @@
+package theodolite.evaluation
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.util.EvaluationFailedException
+import theodolite.util.IOHandler
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import java.text.Normalizer
+import java.time.Duration
+import java.time.Instant
+import java.util.*
+import java.util.regex.Pattern
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Contains the analysis. Fetches a metric from Prometheus, documents it, and evaluates it.
+ * @param slo Slo that is used for the analysis.
+ */
+class AnalysisExecutor(
+    private val slo: BenchmarkExecution.Slo,
+    private val executionId: Int
+) {
+
+    private val fetcher = MetricFetcher(
+        prometheusURL = slo.prometheusUrl,
+        offset = Duration.ofHours(slo.offset.toLong())
+    )
+
+    /**
+     *  Analyses an experiment via prometheus data.
+     *  First fetches data from prometheus, then documents them and afterwards evaluate it via a [slo].
+     *  @param load of the experiment.
+     *  @param res of the experiment.
+     *  @param executionIntervals list of start and end points of experiments
+     *  @return true if the experiment succeeded.
+     */
+    fun analyze(load: LoadDimension, res: Resource, executionIntervals: List<Pair<Instant, Instant>>): Boolean {
+        var result: Boolean
+        var repetitionCounter = 1
+
+        try {
+            val ioHandler = IOHandler()
+            val resultsFolder: String = ioHandler.getResultFolderURL()
+            val fileURL = "${resultsFolder}exp${executionId}_${load.get()}_${res.get()}_${slo.sloType.toSlug()}"
+
+            val prometheusData = executionIntervals
+                .map { interval ->
+                    fetcher.fetchMetric(
+                        start = interval.first,
+                        end = interval.second,
+                        query = SloConfigHandler.getQueryString(sloType = slo.sloType)
+                    )
+                }
+
+            prometheusData.forEach { data ->
+                ioHandler.writeToCSVFile(
+                    fileURL = "${fileURL}_${repetitionCounter++}",
+                    data = data.getResultAsList(),
+                    columns = listOf("labels", "timestamp", "value")
+                )
+            }
+
+            val sloChecker = SloCheckerFactory().create(
+                sloType = slo.sloType,
+                properties = slo.properties,
+                load = load
+            )
+
+            result = sloChecker.evaluate(prometheusData)
+
+        } catch (e: Exception) {
+            throw EvaluationFailedException("Evaluation failed for resource '${res.get()}' and load '${load.get()} ", e)
+        }
+        return result
+    }
+
+    private val NONLATIN: Pattern = Pattern.compile("[^\\w-]")
+    private val WHITESPACE: Pattern = Pattern.compile("[\\s]")
+
+    private fun String.toSlug(): String {
+        val noWhitespace: String = WHITESPACE.matcher(this).replaceAll("-")
+        val normalized: String = Normalizer.normalize(noWhitespace, Normalizer.Form.NFD)
+        val slug: String = NONLATIN.matcher(normalized).replaceAll("")
+        return slug.toLowerCase(Locale.ENGLISH)
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/ExternalSloChecker.kt b/theodolite/src/main/kotlin/theodolite/evaluation/ExternalSloChecker.kt
new file mode 100644
index 0000000000000000000000000000000000000000..d646286b70bc5880df1f603afdc2bda22bcc3259
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/ExternalSloChecker.kt
@@ -0,0 +1,60 @@
+package theodolite.evaluation
+
+import com.google.gson.Gson
+import khttp.post
+import mu.KotlinLogging
+import theodolite.util.PrometheusResponse
+import java.net.ConnectException
+
+/**
+ * [SloChecker] that uses an external source for the concrete evaluation.
+ * @param externalSlopeURL The url under which the external evaluation can be reached.
+ * @param threshold threshold that should not be exceeded to evaluate to true.
+ * @param warmup time that is not taken into consideration for the evaluation.
+ */
+class ExternalSloChecker(
+    private val externalSlopeURL: String,
+    private val threshold: Int,
+    private val warmup: Int
+) : SloChecker {
+
+    private val RETRIES = 2
+    private val TIMEOUT = 60.0
+
+    private val logger = KotlinLogging.logger {}
+
+    /**
+     * Evaluates an experiment using an external service.
+     * Will try to reach the external service until success or [RETRIES] times.
+     * Each request will timeout after [TIMEOUT].
+     *
+     * @param start point of the experiment.
+     * @param end point of the experiment.
+     * @param fetchedData that should be evaluated
+     * @return true if the experiment was successful(the threshold was not exceeded.
+     * @throws ConnectException if the external service could not be reached.
+     */
+    override fun evaluate(fetchedData: List<PrometheusResponse>): Boolean {
+        var counter = 0
+        val data = SloJson.Builder()
+            .results(fetchedData.map { it.data?.result })
+            .addMetadata("threshold", threshold)
+            .addMetadata( "warmup", warmup)
+            .build()
+            .toJson()
+
+        while (counter < RETRIES) {
+            val result = post(externalSlopeURL, data = data, timeout = TIMEOUT)
+            if (result.statusCode != 200) {
+                counter++
+                logger.error { "Could not reach external SLO checker" }
+            } else {
+                val booleanResult = result.text.toBoolean()
+                logger.info { "SLO checker result is: $booleanResult" }
+                return booleanResult
+            }
+        }
+
+        throw ConnectException("Could not reach external SLO checker")
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/MetricFetcher.kt b/theodolite/src/main/kotlin/theodolite/evaluation/MetricFetcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e54d79fe0f95b9f6079bd4295a74e81250b73a90
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/MetricFetcher.kt
@@ -0,0 +1,75 @@
+package theodolite.evaluation
+
+import com.google.gson.Gson
+import khttp.get
+import khttp.responses.Response
+import mu.KotlinLogging
+import theodolite.util.PrometheusResponse
+import java.net.ConnectException
+import java.time.Duration
+import java.time.Instant
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Used to fetch metrics from Prometheus.
+ * @param prometheusURL URL to the Prometheus server.
+ * @param offset Duration of time that the start and end points of the queries
+ * should be shifted. (for different timezones, etc..)
+ */
+class MetricFetcher(private val prometheusURL: String, private val offset: Duration) {
+    private val RETRIES = 2
+    private val TIMEOUT = 60.0
+
+    /**
+     * Tries to fetch a metric by a query to a Prometheus server.
+     * Retries to fetch the metric [RETRIES] times.
+     * Connects to the server via [prometheusURL].
+     *
+     * @param start start point of the query.
+     * @param end end point of the query.
+     * @param query query for the prometheus server.
+     * @throws ConnectException - if the prometheus server timed out/was not reached.
+     */
+    fun fetchMetric(start: Instant, end: Instant, query: String): PrometheusResponse {
+
+        val offsetStart = start.minus(offset)
+        val offsetEnd = end.minus(offset)
+
+        var counter = 0
+        val parameter = mapOf(
+            "query" to query,
+            "start" to offsetStart.toString(),
+            "end" to offsetEnd.toString(),
+            "step" to "5s"
+        )
+
+        while (counter < RETRIES) {
+            val response = get("$prometheusURL/api/v1/query_range", params = parameter, timeout = TIMEOUT)
+            if (response.statusCode != 200) {
+                val message = response.jsonObject.toString()
+                logger.warn { "Could not connect to Prometheus: $message. Retrying now." }
+                counter++
+            } else {
+                val values = parseValues(response)
+                if (values.data?.result.isNullOrEmpty()) {
+                    throw NoSuchFieldException("Empty query result: $values between $start and $end for query $query.")
+                }
+                return parseValues(response)
+            }
+        }
+        throw ConnectException("No answer from Prometheus received.")
+    }
+
+    /**
+     * Deserializes a response from Prometheus.
+     * @param values Response from Prometheus.
+     * @return a [PrometheusResponse]
+     */
+    private fun parseValues(values: Response): PrometheusResponse {
+        return Gson().fromJson<PrometheusResponse>(
+            values.jsonObject.toString(),
+            PrometheusResponse::class.java
+        )
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/SloChecker.kt b/theodolite/src/main/kotlin/theodolite/evaluation/SloChecker.kt
new file mode 100644
index 0000000000000000000000000000000000000000..af70fa5dca3f0556d38791ed96c2af30b9a44a68
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/SloChecker.kt
@@ -0,0 +1,17 @@
+package theodolite.evaluation
+
+import theodolite.util.PrometheusResponse
+
+/**
+ * A SloChecker can be used to evaluate data from Prometheus.
+ * @constructor Creates an empty SloChecker
+ */
+interface SloChecker {
+    /**
+     * Evaluates [fetchedData] and returns if the experiments were successful.
+     *
+     * @param fetchedData from Prometheus that will be evaluated.
+     * @return true if experiments were successful. Otherwise false.
+     */
+    fun evaluate(fetchedData: List<PrometheusResponse>): Boolean
+}
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/SloCheckerFactory.kt b/theodolite/src/main/kotlin/theodolite/evaluation/SloCheckerFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..64f9110cd931feef41dc65f88d6623e82f4e03a2
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/SloCheckerFactory.kt
@@ -0,0 +1,74 @@
+package theodolite.evaluation
+
+import theodolite.util.LoadDimension
+
+/**
+ * Factory used to potentially create different [SloChecker]s.
+ * Supports: lag type.
+ */
+class SloCheckerFactory {
+
+    /**
+     * Creates different [SloChecker]s.
+     *
+     * Supports: `lag trend` and `lag trend percent` as arguments for `sloType`
+     *
+     * ### `lag trend`
+     * Creates an [ExternalSloChecker] with defined parameters.
+     *
+     * The properties map needs the following fields:
+     * - `externalSlopeURL`: Url to the concrete SLO checker service.
+     * - `threshold`: fixed value used for the slope.
+     * - `warmup`: time from the beginning to skip in the analysis.
+     *
+     *
+     * ### `lag trend ratio`
+     * Creates an [ExternalSloChecker] with defined parameters.
+     * The required threshold is computed using a ratio and the load of the experiment.
+     *
+     * The properties map needs the following fields:
+     * - `externalSlopeURL`: Url to the concrete SLO checker service.
+     * - `ratio`: of the executed load that is accepted for the slope.
+     * - `warmup`: time from the beginning to skip in the analysis.
+     *
+     * @param sloType Type of the [SloChecker].
+     * @param properties map of properties to use for the SLO checker creation.
+     * @param load that is executed in the experiment.
+     *
+     * @return A [SloChecker]
+     * @throws IllegalArgumentException If [sloType] not supported.
+     */
+    fun create(
+        sloType: String,
+        properties: MutableMap<String, String>,
+        load: LoadDimension
+    ): SloChecker {
+        return when (sloType.toLowerCase()) {
+            SloTypes.LAG_TREND.value, SloTypes.DROPPED_RECORDS.value -> ExternalSloChecker(
+                externalSlopeURL = properties["externalSloUrl"]
+                    ?: throw IllegalArgumentException("externalSloUrl expected"),
+                threshold = properties["threshold"]?.toInt() ?: throw IllegalArgumentException("threshold expected"),
+                warmup = properties["warmup"]?.toInt() ?: throw IllegalArgumentException("warmup expected")
+            )
+
+                SloTypes.LAG_TREND_RATIO.value, SloTypes.DROPPED_RECORDS_RATIO.value -> {
+                val thresholdRatio =
+                    properties["ratio"]?.toDouble()
+                        ?: throw IllegalArgumentException("ratio for threshold expected")
+                if (thresholdRatio < 0.0) {
+                    throw IllegalArgumentException("Threshold ratio needs to be an Double greater or equal 0.0")
+                }
+                // cast to int, as rounding is not really necessary
+                val threshold = (load.get() * thresholdRatio).toInt()
+
+                ExternalSloChecker(
+                    externalSlopeURL = properties["externalSloUrl"]
+                        ?: throw IllegalArgumentException("externalSloUrl expected"),
+                    threshold = threshold,
+                    warmup = properties["warmup"]?.toInt() ?: throw IllegalArgumentException("warmup expected")
+                )
+            }
+            else -> throw IllegalArgumentException("Slotype $sloType not found.")
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/SloConfigHandler.kt b/theodolite/src/main/kotlin/theodolite/evaluation/SloConfigHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..93929218c822030ff065dafb19cce1fbaa69a179
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/SloConfigHandler.kt
@@ -0,0 +1,20 @@
+package theodolite.evaluation
+
+import theodolite.util.InvalidPatcherConfigurationException
+import javax.enterprise.context.ApplicationScoped
+
+private const val CONSUMER_LAG_QUERY = "sum by(group)(kafka_consumergroup_group_lag >= 0)"
+private const val DROPPED_RECORDS_QUERY = "sum by(job) (kafka_streams_stream_task_metrics_dropped_records_total>=0)"
+
+@ApplicationScoped
+class SloConfigHandler() {
+    companion object {
+        fun getQueryString(sloType: String): String {
+            return when (sloType.toLowerCase()) {
+                SloTypes.LAG_TREND.value, SloTypes.LAG_TREND_RATIO.value -> CONSUMER_LAG_QUERY
+                SloTypes.DROPPED_RECORDS.value, SloTypes.DROPPED_RECORDS_RATIO.value -> DROPPED_RECORDS_QUERY
+                else -> throw  InvalidPatcherConfigurationException("Could not find Prometheus query string for slo type $sloType")
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/SloJson.kt b/theodolite/src/main/kotlin/theodolite/evaluation/SloJson.kt
new file mode 100644
index 0000000000000000000000000000000000000000..fc9fe17b255dbb5ae68881538d8d2a50a191edb1
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/SloJson.kt
@@ -0,0 +1,63 @@
+package theodolite.evaluation
+
+import com.google.gson.Gson
+import theodolite.util.PromResult
+
+class SloJson private constructor(
+    val results: List<List<PromResult>?>? = null,
+    var metadata: MutableMap<String, Any>? = null
+) {
+
+    data class Builder(
+        var results:List<List<PromResult>?>? = null,
+        var metadata: MutableMap<String, Any>? = null
+    ) {
+
+        /**
+         *  Set the results
+         *
+         * @param results list of prometheus results
+         */
+        fun results(results: List<List<PromResult>?>) = apply { this.results = results }
+
+        /**
+         * Add metadata as key value pairs
+         *
+         * @param key key of the metadata to be added
+         * @param value value of the metadata to be added
+         */
+        fun addMetadata(key: String, value: String) = apply {
+            if (this.metadata.isNullOrEmpty()) {
+                this.metadata = mutableMapOf(key to value)
+            } else {
+                this.metadata!![key] = value
+            }
+        }
+
+        /**
+         * Add metadata as key value pairs
+         *
+         * @param key key of the metadata to be added
+         * @param value value of the metadata to be added
+         */
+        fun addMetadata(key: String, value: Int) = apply {
+            if (this.metadata.isNullOrEmpty()) {
+                this.metadata = mutableMapOf(key to value)
+            } else {
+                this.metadata!![key] = value
+            }
+        }
+
+        fun build() = SloJson(
+            results = results,
+            metadata = metadata
+        )
+    }
+
+   fun  toJson(): String {
+       return Gson().toJson(mapOf(
+           "results" to this.results,
+           "metadata" to this.metadata
+       ))
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/evaluation/SloTypes.kt b/theodolite/src/main/kotlin/theodolite/evaluation/SloTypes.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ac9de35861b0bd9c012bfb0b8cfcb2e1aa5aed68
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/evaluation/SloTypes.kt
@@ -0,0 +1,10 @@
+package theodolite.evaluation
+
+enum class SloTypes(val value: String) {
+    LAG_TREND("lag trend"),
+    LAG_TREND_RATIO("lag trend ratio"),
+    DROPPED_RECORDS("dropped records"),
+    DROPPED_RECORDS_RATIO("dropped records ratio")
+
+
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/BenchmarkExecutor.kt b/theodolite/src/main/kotlin/theodolite/execution/BenchmarkExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..3238f447be06ce6486bb7f6ca1758700f36ba558
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/BenchmarkExecutor.kt
@@ -0,0 +1,70 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.util.ConfigurationOverride
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+import java.time.Duration
+import java.util.concurrent.atomic.AtomicBoolean
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The Benchmark Executor runs a single experiment.
+ *
+ * @property benchmark
+ * @property results
+ * @property executionDuration
+ * @constructor Create empty Benchmark executor
+ */
+abstract class BenchmarkExecutor(
+    val benchmark: Benchmark,
+    val results: Results,
+    val executionDuration: Duration,
+    val configurationOverrides: List<ConfigurationOverride?>,
+    val slos: List<BenchmarkExecution.Slo>,
+    val repetitions: Int,
+    val executionId: Int,
+    val loadGenerationDelay: Long,
+    val afterTeardownDelay: Long,
+    val executionName: String
+) {
+
+    var run: AtomicBoolean = AtomicBoolean(true)
+
+    /**
+     * Run a experiment for the given parametrization, evaluate the
+     * experiment and save the result.
+     *
+     * @param load load to be tested.
+     * @param res resources to be tested.
+     * @return True, if the number of resources are suitable for the
+     *     given load, false otherwise.
+     */
+    abstract fun runExperiment(load: LoadDimension, res: Resource): Boolean
+
+    /**
+     * Wait while the benchmark is running and log the number of minutes executed every 1 minute.
+     *
+     */
+    fun waitAndLog() {
+        logger.info { "Execution of a new experiment started." }
+
+        var secondsRunning = 0L
+
+        while (run.get() && secondsRunning < executionDuration.toSeconds()) {
+            secondsRunning++
+            Thread.sleep(Duration.ofSeconds(1).toMillis())
+
+            if ((secondsRunning % 60) == 0L) {
+                logger.info { "Executed: ${secondsRunning / 60} minutes." }
+            }
+        }
+
+        logger.debug { "Executor shutdown gracefully." }
+
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/execution/BenchmarkExecutorImpl.kt b/theodolite/src/main/kotlin/theodolite/execution/BenchmarkExecutorImpl.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2e938be3a6e503a5e7e3f94c18a9454e173db5b0
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/BenchmarkExecutorImpl.kt
@@ -0,0 +1,133 @@
+package theodolite.execution
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.evaluation.AnalysisExecutor
+import theodolite.execution.operator.EventCreator
+import theodolite.util.*
+import java.time.Duration
+import java.time.Instant
+
+private val logger = KotlinLogging.logger {}
+
+@RegisterForReflection
+class BenchmarkExecutorImpl(
+    benchmark: Benchmark,
+    results: Results,
+    executionDuration: Duration,
+    configurationOverrides: List<ConfigurationOverride?>,
+    slos: List<BenchmarkExecution.Slo>,
+    repetitions: Int,
+    executionId: Int,
+    loadGenerationDelay: Long,
+    afterTeardownDelay: Long,
+    executionName: String
+) : BenchmarkExecutor(
+    benchmark,
+    results,
+    executionDuration,
+    configurationOverrides,
+    slos,
+    repetitions,
+    executionId,
+    loadGenerationDelay,
+    afterTeardownDelay,
+    executionName
+) {
+    private val eventCreator = EventCreator()
+    private val mode = Configuration.EXECUTION_MODE
+
+    override fun runExperiment(load: LoadDimension, res: Resource): Boolean {
+        var result = false
+        val executionIntervals: MutableList<Pair<Instant, Instant>> = ArrayList()
+
+        for (i in 1.rangeTo(repetitions)) {
+            if (this.run.get()) {
+                logger.info { "Run repetition $i/$repetitions" }
+                executionIntervals.add(runSingleExperiment(load, res))
+            } else {
+                break
+            }
+        }
+
+        /**
+         * Analyse the experiment, if [run] is true, otherwise the experiment was canceled by the user.
+         */
+        if (this.run.get()) {
+            val experimentResults = slos.map {
+                AnalysisExecutor(slo = it, executionId = executionId)
+                    .analyze(
+                        load = load,
+                        res = res,
+                        executionIntervals = executionIntervals
+                    )
+            }
+
+            result = (false !in experimentResults)
+            this.results.setResult(Pair(load, res), result)
+        }
+
+        if(!this.run.get()) {
+            throw ExecutionFailedException("The execution was interrupted")
+        }
+
+        return result
+    }
+
+    private fun runSingleExperiment(load: LoadDimension, res: Resource): Pair<Instant, Instant> {
+        val benchmarkDeployment = benchmark.buildDeployment(
+            load,
+            res,
+            this.configurationOverrides,
+            this.loadGenerationDelay,
+            this.afterTeardownDelay
+        )
+        val from = Instant.now()
+
+        try {
+            benchmarkDeployment.setup()
+            this.waitAndLog()
+            if (mode == ExecutionModes.OPERATOR.value) {
+                eventCreator.createEvent(
+                    executionName = executionName,
+                    type = "NORMAL",
+                    reason = "Start experiment",
+                    message = "load: ${load.get()}, resources: ${res.get()}")
+            }
+        } catch (e: Exception) {
+            this.run.set(false)
+
+            if (mode == ExecutionModes.OPERATOR.value) {
+                eventCreator.createEvent(
+                    executionName = executionName,
+                    type = "WARNING",
+                    reason = "Start experiment failed",
+                    message = "load: ${load.get()}, resources: ${res.get()}")
+            }
+            throw ExecutionFailedException("Error during setup the experiment", e)
+        }
+        val to = Instant.now()
+        try {
+            benchmarkDeployment.teardown()
+            if (mode == ExecutionModes.OPERATOR.value) {
+                eventCreator.createEvent(
+                    executionName = executionName,
+                    type = "NORMAL",
+                    reason = "Stop experiment",
+                    message = "Teardown complete")
+            }
+        } catch (e: Exception) {
+            if (mode == ExecutionModes.OPERATOR.value) {
+                eventCreator.createEvent(
+                    executionName = executionName,
+                    type = "WARNING",
+                    reason = "Stop experiment failed",
+                    message = "Teardown failed: ${e.message}")
+            }
+            throw ExecutionFailedException("Error during teardown the experiment", e)
+        }
+        return Pair(from, to)
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/ExecutionModes.kt b/theodolite/src/main/kotlin/theodolite/execution/ExecutionModes.kt
new file mode 100644
index 0000000000000000000000000000000000000000..bf947be01b534fd000d3967f0b72ef25978d4110
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/ExecutionModes.kt
@@ -0,0 +1,7 @@
+package theodolite.execution
+
+enum class ExecutionModes(val value: String) {
+    OPERATOR("operator"),
+    YAML_EXECUTOR("yaml-executor"),
+    STANDALONE("standalone")
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/Main.kt b/theodolite/src/main/kotlin/theodolite/execution/Main.kt
new file mode 100644
index 0000000000000000000000000000000000000000..11f696ddd739e987e92ecec724390948714d898b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/Main.kt
@@ -0,0 +1,29 @@
+package theodolite.execution
+
+import io.quarkus.runtime.annotations.QuarkusMain
+import mu.KotlinLogging
+import theodolite.execution.operator.TheodoliteOperator
+import theodolite.util.Configuration
+import kotlin.system.exitProcess
+
+private val logger = KotlinLogging.logger {}
+
+@QuarkusMain
+object Main {
+
+    @JvmStatic
+    fun main(args: Array<String>) {
+
+        val mode = Configuration.EXECUTION_MODE
+        logger.info { "Start Theodolite with mode $mode" }
+
+        when (mode.toLowerCase()) {
+            ExecutionModes.STANDALONE.value, ExecutionModes.YAML_EXECUTOR.value -> TheodoliteStandalone().start()  // TODO remove standalone (#209)
+            ExecutionModes.OPERATOR.value -> TheodoliteOperator().start()
+            else -> {
+                logger.error { "MODE $mode not found" }
+                exitProcess(1)
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/execution/Shutdown.kt b/theodolite/src/main/kotlin/theodolite/execution/Shutdown.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6dedc94af864269d7d15929c69ec54aa384fc8e3
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/Shutdown.kt
@@ -0,0 +1,48 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * This Shutdown Hook can be used to delete all Kubernetes resources which are related to the given execution and benchmark.
+ *
+ * @property benchmarkExecution
+ * @property benchmark
+ */
+class Shutdown(private val benchmarkExecution: BenchmarkExecution, private val benchmark: KubernetesBenchmark) :
+    Thread() {
+
+    /**
+     * Run
+     * Delete all Kubernetes resources which are related to the execution and the benchmark.
+     */
+    override fun run() {
+        // Build Configuration to teardown
+        try {
+            logger.info { "Received shutdown signal -> Shutting down" }
+            val deployment =
+                benchmark.buildDeployment(
+                    load = LoadDimension(0, emptyList()),
+                    res = Resource(0, emptyList()),
+                    configurationOverrides = benchmarkExecution.configOverrides,
+                    loadGenerationDelay = 0L,
+                    afterTeardownDelay = 5L
+                )
+            deployment.teardown()
+            logger.info {
+                "Finished teardown of all benchmark resources."
+            }
+        } catch (e: Exception) {
+            logger.warn {
+                "Could not delete all specified resources from Kubernetes. " +
+                        "This could be the case, if not all resources are deployed and running."
+            }
+
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt b/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..315d1cf1afe7fd2ffbfc1c437d725d4dff29f637
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
@@ -0,0 +1,154 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.patcher.PatcherDefinitionFactory
+import theodolite.strategies.StrategyFactory
+import theodolite.strategies.searchstrategy.CompositeStrategy
+import theodolite.util.*
+import java.io.File
+import java.time.Duration
+
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The Theodolite executor runs all the experiments defined with the given execution and benchmark configuration.
+ *
+ * @property config Configuration of a execution
+ * @property kubernetesBenchmark Configuration of a benchmark
+ * @constructor Create empty Theodolite executor
+ */
+class TheodoliteExecutor(
+    private val config: BenchmarkExecution,
+    private val kubernetesBenchmark: KubernetesBenchmark
+) {
+    /**
+     * An executor object, configured with the specified benchmark, evaluation method, experiment duration
+     * and overrides which are given in the execution.
+     */
+    lateinit var executor: BenchmarkExecutor
+
+    /**
+     * Creates all required components to start Theodolite.
+     *
+     * @return a [Config], that contains a list of [LoadDimension]s,
+     *          a list of [Resource]s , and the [CompositeStrategy].
+     * The [CompositeStrategy] is configured and able to find the minimum required resource for the given load.
+     */
+    private fun buildConfig(): Config {
+        val results = Results()
+        val strategyFactory = StrategyFactory()
+
+        val executionDuration = Duration.ofSeconds(config.execution.duration)
+
+        val resourcePatcherDefinition =
+            PatcherDefinitionFactory().createPatcherDefinition(
+                config.resources.resourceType,
+                this.kubernetesBenchmark.resourceTypes
+            )
+
+        val loadDimensionPatcherDefinition =
+            PatcherDefinitionFactory().createPatcherDefinition(
+                config.load.loadType,
+                this.kubernetesBenchmark.loadTypes
+            )
+
+        executor =
+            BenchmarkExecutorImpl(
+                benchmark = kubernetesBenchmark,
+                results = results,
+                executionDuration = executionDuration,
+                configurationOverrides = config.configOverrides,
+                slos = config.slos,
+                repetitions = config.execution.repetitions,
+                executionId = config.executionId,
+                loadGenerationDelay = config.execution.loadGenerationDelay,
+                afterTeardownDelay = config.execution.afterTeardownDelay,
+                executionName = config.name
+            )
+
+        if (config.load.loadValues != config.load.loadValues.sorted()) {
+            config.load.loadValues = config.load.loadValues.sorted()
+            logger.info {
+                "Load values are not sorted correctly, Theodolite sorts them in ascending order." +
+                        "New order is: ${config.load.loadValues}"
+            }
+        }
+
+        if (config.resources.resourceValues != config.resources.resourceValues.sorted()) {
+            config.resources.resourceValues = config.resources.resourceValues.sorted()
+            logger.info {
+                "Load values are not sorted correctly, Theodolite sorts them in ascending order." +
+                        "New order is: ${config.resources.resourceValues}"
+            }
+        }
+
+        return Config(
+            loads = config.load.loadValues.map { load -> LoadDimension(load, loadDimensionPatcherDefinition) },
+            resources = config.resources.resourceValues.map { resource ->
+                Resource(
+                    resource,
+                    resourcePatcherDefinition
+                )
+            },
+            compositeStrategy = CompositeStrategy(
+                benchmarkExecutor = executor,
+                searchStrategy = strategyFactory.createSearchStrategy(executor, config.execution.strategy),
+                restrictionStrategies = strategyFactory.createRestrictionStrategy(
+                    results,
+                    config.execution.restrictions
+                )
+            )
+        )
+    }
+
+    fun getExecution(): BenchmarkExecution {
+        return this.config
+    }
+
+    /**
+     * Run all experiments which are specified in the corresponding
+     * execution and benchmark objects.
+     */
+    fun run() {
+        kubernetesBenchmark.setupInfrastructure()
+
+        val ioHandler = IOHandler()
+        val resultsFolder = ioHandler.getResultFolderURL()
+        this.config.executionId = getAndIncrementExecutionID(resultsFolder + "expID.txt")
+        ioHandler.writeToJSONFile(this.config, "${resultsFolder}exp${this.config.executionId}-execution-configuration")
+        ioHandler.writeToJSONFile(
+            kubernetesBenchmark,
+            "${resultsFolder}exp${this.config.executionId}-benchmark-configuration"
+        )
+
+        val config = buildConfig()
+        // execute benchmarks for each load
+        try {
+            for (load in config.loads) {
+                if (executor.run.get()) {
+                    config.compositeStrategy.findSuitableResource(load, config.resources)
+                }
+            }
+        } finally {
+            ioHandler.writeToJSONFile(
+                config.compositeStrategy.benchmarkExecutor.results,
+                "${resultsFolder}exp${this.config.executionId}-result"
+            )
+        }
+        kubernetesBenchmark.teardownInfrastructure()
+    }
+
+    private fun getAndIncrementExecutionID(fileURL: String): Int {
+        val ioHandler = IOHandler()
+        var executionID = 0
+        if (File(fileURL).exists()) {
+            executionID = ioHandler.readFileAsString(fileURL).toInt() + 1
+        }
+        ioHandler.writeStringToTextFile(fileURL, (executionID).toString())
+        return executionID
+    }
+
+}
diff --git a/theodolite/src/main/kotlin/theodolite/execution/TheodoliteStandalone.kt b/theodolite/src/main/kotlin/theodolite/execution/TheodoliteStandalone.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1bbf3e01f461a19dbe588aedd41be63b84c86162
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/TheodoliteStandalone.kt
@@ -0,0 +1,66 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.util.YamlParserFromFile
+import theodolite.util.EvaluationFailedException
+import theodolite.util.ExecutionFailedException
+import kotlin.concurrent.thread
+import kotlin.system.exitProcess
+
+private val logger = KotlinLogging.logger {}
+
+
+/**
+ * The Theodolite yaml executor loads the required configurations
+ * of the executions and the benchmark from yaml files and run the
+ * corresponding experiments.
+ *
+ * The location of the execution, benchmarks and Kubernetes resource
+ * files can be configured via the following environment variables:
+ * `THEODOLITE_EXECUTION`
+ *
+ * `THEODOLITE_BENCHMARK`
+ *
+ * `THEODOLITE_APP_RESOURCES`
+ *
+ * @constructor Create empty Theodolite yaml executor
+ */
+class TheodoliteStandalone {
+    private val parser = YamlParserFromFile()
+
+    fun start() {
+        logger.info { "Theodolite started" }
+
+        val executionPath = System.getenv("THEODOLITE_EXECUTION") ?: "execution/execution.yaml"
+        val benchmarkPath = System.getenv("THEODOLITE_BENCHMARK") ?: "benchmark/benchmark.yaml"
+
+        logger.info { "Using $executionPath for BenchmarkExecution" }
+        logger.info { "Using $benchmarkPath for BenchmarkType" }
+
+
+        // load the BenchmarkExecution and the BenchmarkType
+        val benchmarkExecution =
+            parser.parse(path = executionPath, E = BenchmarkExecution::class.java)!!
+        val benchmark =
+            parser.parse(path = benchmarkPath, E = KubernetesBenchmark::class.java)!!
+
+        // Add shutdown hook
+        // Use thread{} with start = false, else the thread will start right away
+        val shutdown = thread(start = false) { Shutdown(benchmarkExecution, benchmark).run() }
+        Runtime.getRuntime().addShutdownHook(shutdown)
+
+        try {
+            TheodoliteExecutor(benchmarkExecution, benchmark).run()
+        } catch (e: EvaluationFailedException) {
+            logger.error { "Evaluation failed with error: ${e.message}" }
+        }catch (e: ExecutionFailedException) {
+            logger.error { "Execution failed with error: ${e.message}" }
+        }
+
+        logger.info { "Theodolite finished" }
+        Runtime.getRuntime().removeShutdownHook(shutdown)
+        exitProcess(0)
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/AbstractStateHandler.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/AbstractStateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0b5d6040bdea1316f8fb55bcc3f204c5443f6eee
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/AbstractStateHandler.kt
@@ -0,0 +1,62 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.api.model.HasMetadata
+import io.fabric8.kubernetes.api.model.KubernetesResourceList
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.client.KubernetesClientException
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import mu.KotlinLogging
+import java.lang.Thread.sleep
+private val logger = KotlinLogging.logger {}
+
+abstract class AbstractStateHandler<T, L, D>(
+    private val client: NamespacedKubernetesClient,
+    private val crd: Class<T>,
+    private val crdList: Class<L>
+) : StateHandler<T> where T : CustomResource<*, *>?, T : HasMetadata, T : Namespaced, L : KubernetesResourceList<T> {
+
+    private val crdClient: MixedOperation<T, L, Resource<T>> =
+        this.client.customResources(this.crd, this.crdList)
+
+    @Synchronized
+    override fun setState(resourceName: String, f: (T) -> T?) {
+        try {
+            this.crdClient
+                .list().items
+                .filter { it.metadata.name == resourceName }
+                .map { customResource -> f(customResource) }
+                .forEach { this.crdClient.updateStatus(it) }
+        } catch (e: KubernetesClientException) {
+            logger.warn { "Status cannot be set for resource $resourceName" }
+        }
+    }
+
+    @Synchronized
+    override fun getState(resourceName: String, f: (T) -> String?): String? {
+        return this.crdClient
+            .list().items
+            .filter { it.metadata.name == resourceName }
+            .map { customResource -> f(customResource) }
+            .firstOrNull()
+    }
+
+    @Synchronized
+    override fun blockUntilStateIsSet(
+        resourceName: String,
+        desiredStatusString: String,
+        f: (T) -> String?,
+        maxTries: Int
+    ): Boolean {
+        for (i in 0.rangeTo(maxTries)) {
+            val currentStatus = getState(resourceName, f)
+            if (currentStatus == desiredStatusString) {
+                return true
+            }
+            sleep(50)
+        }
+        return false
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/BenchmarkStateHandler.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/BenchmarkStateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..adca2a8b7fdb9b3e610f15e57c011679869df14c
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/BenchmarkStateHandler.kt
@@ -0,0 +1,28 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import theodolite.model.crd.*
+
+class BenchmarkStateHandler(val client: NamespacedKubernetesClient) :
+    AbstractStateHandler<BenchmarkCRD, KubernetesBenchmarkList, ExecutionStatus>(
+        client = client,
+        crd = BenchmarkCRD::class.java,
+        crdList = KubernetesBenchmarkList::class.java
+    ) {
+
+    private fun getBenchmarkResourceState() = { cr: BenchmarkCRD -> cr.status.resourceSetsState }
+
+    fun setResourceSetState(resourceName: String, status: BenchmarkStates): Boolean {
+        setState(resourceName) { cr -> cr.status.resourceSetsState = status.value; cr }
+        return blockUntilStateIsSet(resourceName, status.value, getBenchmarkResourceState())
+    }
+
+    fun getResourceSetState(resourceName: String): ExecutionStates {
+        val status = this.getState(resourceName, getBenchmarkResourceState())
+        return if (status.isNullOrBlank()) {
+            ExecutionStates.NO_STATE
+        } else {
+            ExecutionStates.values().first { it.value == status }
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/ClusterSetup.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/ClusterSetup.kt
new file mode 100644
index 0000000000000000000000000000000000000000..efca98f8bf72024daa0367c6c57574f0644872e4
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/ClusterSetup.kt
@@ -0,0 +1,87 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import mu.KotlinLogging
+import theodolite.execution.Shutdown
+import theodolite.k8s.K8sContextFactory
+import theodolite.k8s.ResourceByLabelHandler
+import theodolite.model.crd.*
+
+private val logger = KotlinLogging.logger {}
+
+class ClusterSetup(
+    private val executionCRDClient: MixedOperation<ExecutionCRD, BenchmarkExecutionList, Resource<ExecutionCRD>>,
+    private val benchmarkCRDClient: MixedOperation<BenchmarkCRD, KubernetesBenchmarkList, Resource<BenchmarkCRD>>,
+    private val client: NamespacedKubernetesClient
+
+) {
+    private val serviceMonitorContext = K8sContextFactory().create(
+        api = "v1",
+        scope = "Namespaced",
+        group = "monitoring.coreos.com",
+        plural = "servicemonitors"
+    )
+
+    fun clearClusterState() {
+        stopRunningExecution()
+        clearByLabel()
+    }
+
+    /**
+     * This function searches for executions in the cluster that have the status running and tries to stop the execution.
+     * For this the corresponding benchmark is searched and terminated.
+     *
+     * Throws [IllegalStateException] if no suitable benchmark can be found.
+     *
+     */
+    private fun stopRunningExecution() {
+        executionCRDClient
+            .list()
+            .items
+            .asSequence()
+            .filter { it.status.executionState == ExecutionStates.RUNNING.value }
+            .forEach { execution ->
+                val benchmark = benchmarkCRDClient
+                    .inNamespace(client.namespace)
+                    .list()
+                    .items
+                    .firstOrNull { it.metadata.name == execution.spec.benchmark }
+
+                if (benchmark != null) {
+                    execution.spec.name = execution.metadata.name
+                    benchmark.spec.name = benchmark.metadata.name
+                    Shutdown(execution.spec, benchmark.spec).start()
+                } else {
+                    throw IllegalStateException("Execution with state ${ExecutionStates.RUNNING.value} was found, but no corresponding benchmark. " +
+                            "Could not initialize cluster.")
+                }
+            }
+    }
+
+    private fun clearByLabel() {
+        val resourceRemover = ResourceByLabelHandler(client = client)
+        resourceRemover.removeServices(
+            labelName = "app.kubernetes.io/created-by",
+            labelValue = "theodolite"
+        )
+        resourceRemover.removeDeployments(
+            labelName = "app.kubernetes.io/created-by",
+            labelValue = "theodolite"
+        )
+        resourceRemover.removeStatefulSets(
+            labelName = "app.kubernetes.io/created-by",
+            labelValue = "theodolite"
+        )
+        resourceRemover.removeConfigMaps(
+            labelName = "app.kubernetes.io/created-by",
+            labelValue = "theodolite"
+        )
+        resourceRemover.removeCR(
+            labelName = "app.kubernetes.io/created-by",
+            labelValue = "theodolite",
+            context = serviceMonitorContext
+        )
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/EventCreator.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/EventCreator.kt
new file mode 100644
index 0000000000000000000000000000000000000000..fab098ebd5fe765a455d787ddb7fcbfbb6c9ffc7
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/EventCreator.kt
@@ -0,0 +1,60 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.api.model.EventBuilder
+import io.fabric8.kubernetes.api.model.EventSource
+import io.fabric8.kubernetes.api.model.ObjectReference
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import mu.KotlinLogging
+import theodolite.util.Configuration
+import java.time.Instant
+import java.util.*
+import kotlin.NoSuchElementException
+private val logger = KotlinLogging.logger {}
+
+class EventCreator {
+    val client: NamespacedKubernetesClient = DefaultKubernetesClient().inNamespace(Configuration.NAMESPACE)
+
+    fun createEvent(executionName: String, type: String, message: String, reason: String) {
+        val uuid = UUID.randomUUID().toString()
+        try {
+            val objectRef = buildObjectReference(executionName)
+            val event = EventBuilder()
+                .withNewMetadata()
+                .withName(uuid)
+                .endMetadata()
+                .withMessage(message)
+                .withReason(reason)
+                .withType(type)
+                .withFirstTimestamp(Instant.now().toString()) // TODO change datetime format
+                .build()
+
+            val source =  EventSource()
+            source.component = Configuration.COMPONENT_NAME
+            event.source = source
+
+            event.involvedObject = objectRef
+            client.v1().events().inNamespace(Configuration.NAMESPACE).createOrReplace(event)
+        } catch (e: NoSuchElementException) {
+                logger.warn {"Could not create event: type: $type, message: $message, reason: $reason, no corresponding execution found."}
+        }
+    }
+
+    private fun buildObjectReference(executionName: String): ObjectReference {
+        val exec = TheodoliteOperator()
+            .getExecutionClient(client = client)
+            .list()
+            .items
+            .first{it.metadata.name == executionName}
+
+        val objectRef = ObjectReference()
+        objectRef.apiVersion = exec.apiVersion
+        objectRef.kind = exec.kind
+        objectRef.uid = exec.metadata.uid
+        objectRef.name = exec.metadata.name
+        objectRef.namespace = exec.metadata.namespace
+        objectRef.resourceVersion = exec.metadata.resourceVersion
+
+        return objectRef
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/ExecutionEventHandler.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/ExecutionEventHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..16c4ea98ba614bb3dcdd7d9f486f4e65ae70d380
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/ExecutionEventHandler.kt
@@ -0,0 +1,88 @@
+package theodolite.execution.operator
+
+import com.google.gson.Gson
+import com.google.gson.GsonBuilder
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.model.crd.*
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Handles adding, updating and deleting BenchmarkExecutions.
+ *
+ * @param controller The TheodoliteController that handles the application state
+ *
+ * @see TheodoliteController
+ * @see BenchmarkExecution
+ */
+class ExecutionHandler(
+    private val controller: TheodoliteController,
+    private val stateHandler: ExecutionStateHandler
+) : ResourceEventHandler<ExecutionCRD> {
+    private val gson: Gson = GsonBuilder().enableComplexMapKeySerialization().create()
+
+    /**
+     * Add an execution to the end of the queue of the TheodoliteController.
+     *
+     * @param ExecutionCRD the execution to add
+     */
+    @Synchronized
+    override fun onAdd(execution: ExecutionCRD) {
+        logger.info { "Add execution ${execution.metadata.name}" }
+        execution.spec.name = execution.metadata.name
+        when (this.stateHandler.getExecutionState(execution.metadata.name)) {
+            ExecutionStates.NO_STATE -> this.stateHandler.setExecutionState(execution.spec.name, ExecutionStates.PENDING)
+            ExecutionStates.RUNNING -> {
+                this.stateHandler.setExecutionState(execution.spec.name, ExecutionStates.RESTART)
+                if (this.controller.isExecutionRunning(execution.spec.name)) {
+                    this.controller.stop(restart = true)
+                }
+            }
+        }
+    }
+
+    /**
+     * Updates an execution. If this execution is running at the time this function is called, it is stopped and
+     * added to the beginning of the queue of the TheodoliteController.
+     * Otherwise, it is just added to the beginning of the queue.
+     *
+     * @param oldExecutionCRD the old execution
+     * @param newExecutionCRD the new execution
+     */
+    @Synchronized
+    override fun onUpdate(oldExecution: ExecutionCRD, newExecution: ExecutionCRD) {
+        newExecution.spec.name = newExecution.metadata.name
+        oldExecution.spec.name = oldExecution.metadata.name
+        if (gson.toJson(oldExecution.spec) != gson.toJson(newExecution.spec)) {
+            logger.info { "Receive update event for execution ${oldExecution.metadata.name}" }
+            when (this.stateHandler.getExecutionState(newExecution.metadata.name)) {
+                ExecutionStates.RUNNING -> {
+                    this.stateHandler.setExecutionState(newExecution.spec.name, ExecutionStates.RESTART)
+                    if (this.controller.isExecutionRunning(newExecution.spec.name)) {
+                        this.controller.stop(restart = true)
+                    }
+                }
+                ExecutionStates.RESTART -> {
+                } // should this set to pending?
+                else -> this.stateHandler.setExecutionState(newExecution.spec.name, ExecutionStates.PENDING)
+            }
+        }
+    }
+
+    /**
+     * Delete an execution from the queue of the TheodoliteController.
+     *
+     * @param ExecutionCRD the execution to delete
+     */
+    @Synchronized
+    override fun onDelete(execution: ExecutionCRD, b: Boolean) {
+        logger.info { "Delete execution ${execution.metadata.name}" }
+        if (execution.status.executionState == ExecutionStates.RUNNING.value
+            && this.controller.isExecutionRunning(execution.metadata.name)
+        ) {
+            this.controller.stop()
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/ExecutionStateHandler.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/ExecutionStateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9f49cf3ee4f9f62e7006dbf6697340e1af152f27
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/ExecutionStateHandler.kt
@@ -0,0 +1,81 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import theodolite.model.crd.BenchmarkExecutionList
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.ExecutionStatus
+import theodolite.model.crd.ExecutionStates
+import java.lang.Thread.sleep
+import java.time.Duration
+import java.time.Instant
+import java.util.concurrent.atomic.AtomicBoolean
+
+class ExecutionStateHandler(val client: NamespacedKubernetesClient) :
+    AbstractStateHandler<ExecutionCRD, BenchmarkExecutionList, ExecutionStatus>(
+        client = client,
+        crd = ExecutionCRD::class.java,
+        crdList = BenchmarkExecutionList::class.java
+    ) {
+
+    private var runExecutionDurationTimer: AtomicBoolean = AtomicBoolean(false)
+
+    private fun getExecutionLambda() = { cr: ExecutionCRD -> cr.status.executionState }
+
+    private fun getDurationLambda() = { cr: ExecutionCRD -> cr.status.executionDuration }
+
+    fun setExecutionState(resourceName: String, status: ExecutionStates): Boolean {
+        setState(resourceName) { cr -> cr.status.executionState = status.value; cr }
+        return blockUntilStateIsSet(resourceName, status.value, getExecutionLambda())
+    }
+
+    fun getExecutionState(resourceName: String): ExecutionStates {
+        val status = this.getState(resourceName, getExecutionLambda())
+        return if (status.isNullOrBlank()) {
+            ExecutionStates.NO_STATE
+        } else {
+            ExecutionStates.values().first { it.value == status }
+        }
+    }
+
+    fun setDurationState(resourceName: String, duration: Duration): Boolean {
+        setState(resourceName) { cr -> cr.status.executionDuration = durationToK8sString(duration); cr }
+        return blockUntilStateIsSet(resourceName, durationToK8sString(duration), getDurationLambda())
+    }
+
+    fun getDurationState(resourceName: String): String {
+        val status = getState(resourceName, getDurationLambda())
+        return if (status.isNullOrBlank()) {
+            "-"
+        } else {
+            status
+        }
+    }
+
+    private fun durationToK8sString(duration: Duration): String {
+        val sec = duration.seconds
+        return when {
+            sec <= 120 -> "${sec}s" // max 120s
+            sec < 60 * 99 -> "${duration.toMinutes()}m" // max 99m
+            sec < 60 * 60 * 99 -> "${duration.toHours()}h"   // max 99h
+            else -> "${duration.toDays()}d + ${duration.minusDays(duration.toDays()).toHours()}h"
+        }
+    }
+
+    fun startDurationStateTimer(resourceName: String) {
+        this.runExecutionDurationTimer.set(true)
+        val startTime = Instant.now().toEpochMilli()
+        Thread {
+            while (this.runExecutionDurationTimer.get()) {
+                val duration = Duration.ofMillis(Instant.now().minusMillis(startTime).toEpochMilli())
+                setDurationState(resourceName, duration)
+                sleep(100 * 1)
+            }
+        }.start()
+    }
+
+    @Synchronized
+    fun stopDurationStateTimer() {
+        this.runExecutionDurationTimer.set(false)
+        sleep(100 * 2)
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/LeaderElector.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/LeaderElector.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1ce94c2fdd1ce13d50a21e01b9d4692c87d0da6f
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/LeaderElector.kt
@@ -0,0 +1,44 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.extended.leaderelection.LeaderCallbacks
+import io.fabric8.kubernetes.client.extended.leaderelection.LeaderElectionConfigBuilder
+import io.fabric8.kubernetes.client.extended.leaderelection.resourcelock.LeaseLock
+import mu.KotlinLogging
+import java.time.Duration
+import java.util.*
+import kotlin.reflect.KFunction0
+
+private val logger = KotlinLogging.logger {}
+
+class LeaderElector(
+    val client: NamespacedKubernetesClient,
+    val name: String
+) {
+
+    // TODO(what is the name of the lock? .withName() or LeaseLock(..,name..) ?)
+    fun getLeadership(leader: KFunction0<Unit>) {
+        val lockIdentity: String = UUID.randomUUID().toString()
+        DefaultKubernetesClient().use { kc ->
+            kc.leaderElector()
+                .withConfig(
+                    LeaderElectionConfigBuilder()
+                        .withName("Theodolite")
+                        .withLeaseDuration(Duration.ofSeconds(15L))
+                        .withLock(LeaseLock(client.namespace, name, lockIdentity))
+                        .withRenewDeadline(Duration.ofSeconds(10L))
+                        .withRetryPeriod(Duration.ofSeconds(2L))
+                        .withLeaderCallbacks(LeaderCallbacks(
+                            { Thread { leader() }.start() },
+                            { logger.info { "STOPPED LEADERSHIP" } }
+                        ) { newLeader: String? ->
+                            logger.info { "New leader elected $newLeader" }
+                        })
+                        .build()
+                )
+                .build().run()
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/StateHandler.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/StateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e2cfaa354443cdc940abf92ef2c7474d028daecf
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/StateHandler.kt
@@ -0,0 +1,15 @@
+package theodolite.execution.operator
+
+private const val MAX_TRIES: Int = 5
+
+interface StateHandler<T> {
+    fun setState(resourceName: String, f: (T) -> T?)
+    fun getState(resourceName: String, f: (T) -> String?): String?
+    fun blockUntilStateIsSet(
+        resourceName: String,
+        desiredStatusString: String,
+        f: (T) -> String?,
+        maxTries: Int = MAX_TRIES
+    ): Boolean
+
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/TheodoliteController.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/TheodoliteController.kt
new file mode 100644
index 0000000000000000000000000000000000000000..70e30cf84ef40796eb085a0d68eb2e323232fde9
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/TheodoliteController.kt
@@ -0,0 +1,209 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.execution.TheodoliteExecutor
+import theodolite.model.crd.*
+import theodolite.patcher.ConfigOverrideModifier
+import theodolite.util.ExecutionStateComparator
+import java.lang.Thread.sleep
+
+private val logger = KotlinLogging.logger {}
+const val DEPLOYED_FOR_EXECUTION_LABEL_NAME = "deployed-for-execution"
+const val DEPLOYED_FOR_BENCHMARK_LABEL_NAME = "deployed-for-benchmark"
+const val CREATED_BY_LABEL_NAME = "app.kubernetes.io/created-by"
+const val CREATED_BY_LABEL_VALUE = "theodolite"
+
+/**
+ * The controller implementation for Theodolite.
+ *
+ * @see BenchmarkExecution
+ * @see KubernetesBenchmark
+ * @see ConcurrentLinkedDeque
+ */
+
+class TheodoliteController(
+    private val executionCRDClient: MixedOperation<ExecutionCRD, BenchmarkExecutionList, Resource<ExecutionCRD>>,
+    private val benchmarkCRDClient: MixedOperation<BenchmarkCRD, KubernetesBenchmarkList, Resource<BenchmarkCRD>>,
+    private val executionStateHandler: ExecutionStateHandler,
+    private val benchmarkStateHandler: BenchmarkStateHandler
+) {
+    lateinit var executor: TheodoliteExecutor
+
+    /**
+     *
+     * Runs the TheodoliteController forever.
+     */
+    fun run() {
+        sleep(5000) // wait until all states are correctly set
+        while (true) {
+            reconcile()
+            updateBenchmarkStatus()
+            sleep(2000)
+        }
+    }
+
+    private fun reconcile() {
+        do {
+            val execution = getNextExecution()
+            updateBenchmarkStatus()
+            if (execution != null) {
+                val benchmark = getBenchmarks()
+                    .map { it.spec }
+                    .firstOrNull { it.name == execution.benchmark }
+                if (benchmark != null) {
+                    runExecution(execution, benchmark)
+                }
+            } else {
+                logger.info { "Could not find executable execution." }
+            }
+        } while (execution != null)
+    }
+
+    /**
+     * Execute a benchmark with a defined KubernetesBenchmark and BenchmarkExecution
+     *
+     * @see BenchmarkExecution
+     */
+    private fun runExecution(execution: BenchmarkExecution, benchmark: KubernetesBenchmark) {
+        try {
+            val modifier = ConfigOverrideModifier(
+            execution = execution,
+            resources = benchmark.loadKubernetesResources(benchmark.sut.resources).map { it.first }
+                    + benchmark.loadKubernetesResources(benchmark.loadGenerator.resources).map { it.first }
+        )
+        modifier.setAdditionalLabels(
+            labelValue = execution.name,
+            labelName = DEPLOYED_FOR_EXECUTION_LABEL_NAME
+        )
+        modifier.setAdditionalLabels(
+            labelValue = benchmark.name,
+            labelName = DEPLOYED_FOR_BENCHMARK_LABEL_NAME
+        )
+        modifier.setAdditionalLabels(
+            labelValue = CREATED_BY_LABEL_VALUE,
+            labelName = CREATED_BY_LABEL_NAME
+        )
+
+        executionStateHandler.setExecutionState(execution.name, ExecutionStates.RUNNING)
+        executionStateHandler.startDurationStateTimer(execution.name)
+
+            executor = TheodoliteExecutor(execution, benchmark)
+            executor.run()
+            when (executionStateHandler.getExecutionState(execution.name)) {
+                ExecutionStates.RESTART -> runExecution(execution, benchmark)
+                ExecutionStates.RUNNING -> {
+                    executionStateHandler.setExecutionState(execution.name, ExecutionStates.FINISHED)
+                    logger.info { "Execution of ${execution.name} is finally stopped." }
+                    }
+                else -> {
+                    executionStateHandler.setExecutionState(execution.name, ExecutionStates.FAILURE)
+                    logger.warn { "Unexpected execution state, set state to ${ExecutionStates.FAILURE.value}" }
+                }
+            }
+        } catch (e: Exception) {
+                EventCreator().createEvent(
+                executionName = execution.name,
+                type = "WARNING",
+                reason = "Execution failed",
+                message = "An error occurs while executing:  ${e.message}")
+            logger.error { "Failure while executing execution ${execution.name} with benchmark ${benchmark.name}." }
+            logger.error { "Problem is: $e" }
+            executionStateHandler.setExecutionState(execution.name, ExecutionStates.FAILURE)
+        }
+        executionStateHandler.stopDurationStateTimer()
+    }
+
+    @Synchronized
+    fun stop(restart: Boolean = false) {
+        if (!::executor.isInitialized) return
+        if (restart) {
+            executionStateHandler.setExecutionState(this.executor.getExecution().name, ExecutionStates.RESTART)
+        }
+        this.executor.executor.run.set(false)
+    }
+
+    /**
+     * @return all available [BenchmarkCRD]s
+     */
+    private fun getBenchmarks(): List<BenchmarkCRD> {
+        return this.benchmarkCRDClient
+            .list()
+            .items
+            .map {
+                it.spec.name = it.metadata.name
+                it
+            }
+    }
+
+
+    /**
+     * Get the [BenchmarkExecution] for the next run. Which [BenchmarkExecution]
+     * is selected for the next execution depends on three points:
+     *
+     * 1. Only executions are considered for which a matching benchmark is available on the cluster
+     * 2. The Status of the execution must be [ExecutionStates.PENDING] or [ExecutionStates.RESTART]
+     * 3. Of the remaining [BenchmarkCRD], those with status [ExecutionStates.RESTART] are preferred,
+     * then, if there is more than one, the oldest execution is chosen.
+     *
+     * @return the next execution or null
+     */
+    private fun getNextExecution(): BenchmarkExecution? {
+        val comparator = ExecutionStateComparator(ExecutionStates.RESTART)
+        val availableBenchmarkNames = getBenchmarks()
+            .filter { it.status.resourceSetsState == BenchmarkStates.READY.value }
+            .map { it.spec }
+            .map { it.name }
+
+        return executionCRDClient
+            .list()
+            .items
+            .asSequence()
+            .map { it.spec.name = it.metadata.name; it }
+            .filter {
+                it.status.executionState == ExecutionStates.PENDING.value ||
+                        it.status.executionState == ExecutionStates.RESTART.value
+            }
+            .filter { availableBenchmarkNames.contains(it.spec.benchmark) }
+            .sortedWith(comparator.thenBy { it.metadata.creationTimestamp })
+            .map { it.spec }
+            .firstOrNull()
+    }
+
+    private fun updateBenchmarkStatus() {
+        this.benchmarkCRDClient
+            .list()
+            .items
+            .map { it.spec.name = it.metadata.name; it }
+            .map { Pair(it, checkResource(it.spec)) }
+            .forEach { setState(it.first, it.second ) }
+    }
+
+    private fun setState(resource: BenchmarkCRD, state: BenchmarkStates) {
+        benchmarkStateHandler.setResourceSetState(resource.spec.name, state)
+    }
+
+    private fun checkResource(benchmark: KubernetesBenchmark): BenchmarkStates {
+        return try {
+            val appResources =
+                benchmark.loadKubernetesResources(resourceSet = benchmark.sut.resources)
+            val loadGenResources =
+                benchmark.loadKubernetesResources(resourceSet = benchmark.sut.resources)
+            if(appResources.isNotEmpty() && loadGenResources.isNotEmpty()) {
+                BenchmarkStates.READY
+            } else {
+                BenchmarkStates.PENDING
+            }
+        } catch (e: Exception) {
+            BenchmarkStates.PENDING
+        }
+    }
+
+    fun isExecutionRunning(executionName: String): Boolean {
+        if (!::executor.isInitialized) return false
+        return this.executor.getExecution().name == executionName
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/execution/operator/TheodoliteOperator.kt b/theodolite/src/main/kotlin/theodolite/execution/operator/TheodoliteOperator.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4850a44fdddba117178e29d3170f44a95df646e7
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/execution/operator/TheodoliteOperator.kt
@@ -0,0 +1,150 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory
+import io.fabric8.kubernetes.internal.KubernetesDeserializer
+import mu.KotlinLogging
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.model.crd.BenchmarkExecutionList
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.KubernetesBenchmarkList
+import theodolite.util.Configuration
+
+
+private const val DEFAULT_NAMESPACE = "default"
+private const val EXECUTION_SINGULAR = "execution"
+private const val BENCHMARK_SINGULAR = "benchmark"
+private const val API_VERSION = "v1"
+private const val RESYNC_PERIOD = 10 * 60 * 1000.toLong()
+private const val GROUP = "theodolite.com"
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Implementation of the Operator pattern for K8s.
+ *
+ * **See Also:** [Kubernetes Operator Pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
+ */
+class TheodoliteOperator {
+    private val namespace = Configuration.NAMESPACE
+
+    private val client: NamespacedKubernetesClient = DefaultKubernetesClient().inNamespace(namespace)
+    private lateinit var controller: TheodoliteController
+    private lateinit var executionStateHandler: ExecutionStateHandler
+    private lateinit var benchmarkStateHandler: BenchmarkStateHandler
+
+
+    fun start() {
+        LeaderElector(
+            client = client,
+            name = Configuration.COMPONENT_NAME
+        )
+            .getLeadership(::startOperator)
+    }
+
+    /**
+     * Start the operator.
+     */
+    private fun startOperator() {
+        logger.info { "Using $namespace as namespace." }
+        client.use {
+            KubernetesDeserializer.registerCustomKind(
+                "$GROUP/$API_VERSION",
+                EXECUTION_SINGULAR,
+                ExecutionCRD::class.java
+            )
+
+            KubernetesDeserializer.registerCustomKind(
+                "$GROUP/$API_VERSION",
+                BENCHMARK_SINGULAR,
+                BenchmarkCRD::class.java
+            )
+
+            ClusterSetup(
+                executionCRDClient = getExecutionClient(client),
+                benchmarkCRDClient = getBenchmarkClient(client),
+                client = client
+            ).clearClusterState()
+
+            controller = getController(
+                client = client,
+                executionStateHandler = getExecutionStateHandler(client = client),
+                benchmarkStateHandler = getBenchmarkStateHandler(client = client)
+
+            )
+            getExecutionEventHandler(controller, client).startAllRegisteredInformers()
+            controller.run()
+        }
+    }
+
+    fun getExecutionEventHandler(
+        controller: TheodoliteController,
+        client: NamespacedKubernetesClient
+    ): SharedInformerFactory {
+        val factory = client.informers()
+            .inNamespace(client.namespace)
+
+        factory.sharedIndexInformerForCustomResource(
+            ExecutionCRD::class.java,
+            RESYNC_PERIOD
+        ).addEventHandler(
+            ExecutionHandler(
+                controller = controller,
+                stateHandler = ExecutionStateHandler(client)
+            )
+        )
+        return factory
+    }
+
+    fun getExecutionStateHandler(client: NamespacedKubernetesClient): ExecutionStateHandler {
+        if (!::executionStateHandler.isInitialized) {
+            this.executionStateHandler = ExecutionStateHandler(client = client)
+        }
+        return executionStateHandler
+    }
+
+    fun getBenchmarkStateHandler(client: NamespacedKubernetesClient) : BenchmarkStateHandler {
+        if (!::benchmarkStateHandler.isInitialized) {
+            this.benchmarkStateHandler = BenchmarkStateHandler(client = client)
+        }
+        return benchmarkStateHandler
+    }
+
+    fun getController(
+        client: NamespacedKubernetesClient,
+        executionStateHandler: ExecutionStateHandler,
+        benchmarkStateHandler: BenchmarkStateHandler
+    ): TheodoliteController {
+        if (!::controller.isInitialized) {
+            this.controller = TheodoliteController(
+                benchmarkCRDClient = getBenchmarkClient(client),
+                executionCRDClient = getExecutionClient(client),
+                executionStateHandler = executionStateHandler,
+                benchmarkStateHandler = benchmarkStateHandler
+            )
+        }
+        return this.controller
+    }
+
+    fun getExecutionClient(client: NamespacedKubernetesClient): MixedOperation<
+            ExecutionCRD,
+            BenchmarkExecutionList,
+            Resource<ExecutionCRD>> {
+        return client.customResources(
+            ExecutionCRD::class.java,
+            BenchmarkExecutionList::class.java
+        )
+    }
+
+    fun getBenchmarkClient(client: NamespacedKubernetesClient): MixedOperation<
+            BenchmarkCRD,
+            KubernetesBenchmarkList,
+            Resource<BenchmarkCRD>> {
+        return client.customResources(
+            BenchmarkCRD::class.java,
+            KubernetesBenchmarkList::class.java
+        )
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/CustomResourceWrapper.kt b/theodolite/src/main/kotlin/theodolite/k8s/CustomResourceWrapper.kt
new file mode 100644
index 0000000000000000000000000000000000000000..797ed88389947d66aa626ba2ef3fdf6732f8369d
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/CustomResourceWrapper.kt
@@ -0,0 +1,47 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+import mu.KotlinLogging
+
+private val logger = KotlinLogging.logger {}
+
+class CustomResourceWrapper(
+    val crAsMap: Map<String, String>,
+    private val context: CustomResourceDefinitionContext
+) : KubernetesResource {
+    /**
+     * Deploy a service monitor
+     *
+     * @param client a namespaced Kubernetes client which are used to deploy the CR object.
+     *
+     * @throws java.io.IOException if the resource could not be deployed.
+     */
+    fun deploy(client: NamespacedKubernetesClient) {
+        client.customResource(this.context)
+            .createOrReplace(client.configuration.namespace, this.crAsMap as Map<String, Any>)
+    }
+
+    /**
+     * Delete a service monitor
+     *
+     * @param client a namespaced Kubernetes client which are used to delete the CR object.
+     */
+    fun delete(client: NamespacedKubernetesClient) {
+        try {
+            client.customResource(this.context)
+                .delete(client.configuration.namespace, this.getName())
+        } catch (e: Exception) {
+            logger.warn { "Could not delete custom resource" }
+        }
+    }
+
+    /**
+     * @throws NullPointerException if name or metadata is null
+     */
+    fun getName(): String {
+        val metadataAsMap = this.crAsMap["metadata"]!! as Map<String, String>
+        return metadataAsMap["name"]!!
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/K8sContextFactory.kt b/theodolite/src/main/kotlin/theodolite/k8s/K8sContextFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7eb209bfbab02bb94d34c985aa308173e509d4e4
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/K8sContextFactory.kt
@@ -0,0 +1,32 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+
+/**
+ * Factory for CustomResourceDefinitionContext
+ *
+ * @see CustomResourceDefinitionContext
+ */
+class K8sContextFactory {
+
+    /**
+     * Create a CustomResourceDefinitionContext.
+     *
+     * @param api The K8s API version
+     * @param scope The scope of the CRD
+     * @param group The group of the CRD
+     * @param plural The plural name (kind) of the CRD
+     *
+     * @return a new CustomResourceDefinitionContext
+     *
+     * @see CustomResourceDefinitionContext
+     */
+    fun create(api: String, scope: String, group: String, plural: String): CustomResourceDefinitionContext {
+        return CustomResourceDefinitionContext.Builder()
+            .withVersion(api)
+            .withScope(scope)
+            .withGroup(group)
+            .withPlural(plural)
+            .build()
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/K8sManager.kt b/theodolite/src/main/kotlin/theodolite/k8s/K8sManager.kt
new file mode 100644
index 0000000000000000000000000000000000000000..389d5eefad556df502c218862e2f253ef8ad2100
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/K8sManager.kt
@@ -0,0 +1,69 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import mu.KotlinLogging
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * This class is used to deploy or remove different Kubernetes resources.
+ * Supports: Deployments, Services, ConfigMaps, StatefulSets, and CustomResources.
+ * @param client KubernetesClient used to deploy or remove.
+ */
+class K8sManager(private val client: NamespacedKubernetesClient) {
+
+    /**
+     * Deploys different k8s resources using the client.
+     * @throws IllegalArgumentException if KubernetesResource not supported.
+     */
+    fun deploy(resource: KubernetesResource) {
+        when (resource) {
+            is Deployment ->
+                this.client.apps().deployments().createOrReplace(resource)
+            is Service ->
+                this.client.services().createOrReplace(resource)
+            is ConfigMap ->
+                this.client.configMaps().createOrReplace(resource)
+            is StatefulSet ->
+                this.client.apps().statefulSets().createOrReplace(resource)
+            is CustomResourceWrapper -> resource.deploy(client)
+            else -> throw IllegalArgumentException("Unknown Kubernetes resource.")
+        }
+    }
+
+    /**
+     * Removes different k8s resources using the client.
+     * @throws IllegalArgumentException if KubernetesResource not supported.
+     */
+    fun remove(resource: KubernetesResource) {
+        when (resource) {
+            is Deployment -> {
+                this.client.apps().deployments().delete(resource)
+                ResourceByLabelHandler(client = client)
+                    .blockUntilPodsDeleted(
+                        matchLabels = resource.spec.selector.matchLabels
+                    )
+                logger.info { "Deployment '${resource.metadata.name}' deleted." }
+            }
+            is Service ->
+                this.client.services().delete(resource)
+            is ConfigMap ->
+                this.client.configMaps().delete(resource)
+            is StatefulSet -> {
+                this.client.apps().statefulSets().delete(resource)
+                ResourceByLabelHandler(client = client)
+                    .blockUntilPodsDeleted(
+                        matchLabels = resource.spec.selector.matchLabels
+                    )
+                logger.info { "StatefulSet '$resource.metadata.name' deleted." }
+            }
+            is CustomResourceWrapper -> resource.delete(client)
+            else -> throw IllegalArgumentException("Unknown Kubernetes resource.")
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/ResourceByLabelHandler.kt b/theodolite/src/main/kotlin/theodolite/k8s/ResourceByLabelHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..28a72c8947bffe7b57203cacf2460d7080fa7b51
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/ResourceByLabelHandler.kt
@@ -0,0 +1,115 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+import mu.KotlinLogging
+import org.json.JSONObject
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The ResourceByLabelHandler provides basic functions to manage Kubernetes resources through their labels.
+ * @param client NamespacedKubernetesClient used for the deletion.
+ */
+class ResourceByLabelHandler(private val client: NamespacedKubernetesClient) {
+
+    /**
+     * Deletes all pods with the selected label.
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     */
+    fun removePods(labelName: String, labelValue: String) {
+        this.client
+            .pods()
+            .withLabel("$labelName=$labelValue").delete()
+        logger.info { "Pod with label: $labelName=$labelValue deleted" }
+    }
+
+    /**
+     * Deletes all services with the selected label.
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     */
+    fun removeServices(labelName: String, labelValue: String) {
+        this.client
+            .services()
+            .withLabel("$labelName=$labelValue")
+            .delete()
+    }
+
+    /**
+     * Deletes all deployments with the selected label.
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     */
+    fun removeDeployments(labelName: String, labelValue: String) {
+        this.client
+            .apps()
+            .deployments()
+            .withLabel("$labelName=$labelValue")
+            .delete()
+
+    }
+
+    /**
+     * Deletes all stateful sets with the selected label.
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     */
+    fun removeStatefulSets(labelName: String, labelValue: String) {
+        this.client
+            .apps()
+            .statefulSets()
+            .withLabel("$labelName=$labelValue")
+            .delete()
+    }
+
+    /**
+     * Deletes all configmaps with the selected label.
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     */
+    fun removeConfigMaps(labelName: String, labelValue: String) {
+        this.client
+            .configMaps()
+            .withLabel("$labelName=$labelValue")
+            .delete()
+    }
+
+    /**
+     * Deletes all custom resources sets with the selected label.
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     */
+    fun removeCR(labelName: String, labelValue: String, context: CustomResourceDefinitionContext) {
+        val customResources = JSONObject(
+            this.client.customResource(context)
+                .list(client.namespace, mapOf(Pair(labelName, labelValue)))
+        )
+            .getJSONArray("items")
+
+        (0 until customResources.length())
+            .map { customResources.getJSONObject(it).getJSONObject("metadata").getString("name") }
+            .forEach { this.client.customResource(context).delete(client.namespace, it) }
+    }
+
+    /**
+     * Block until all pods with are deleted
+     *
+     * @param [labelName] the label name
+     * @param [labelValue] the value of this label
+     * */
+    fun blockUntilPodsDeleted(matchLabels: MutableMap<String, String>) {
+        while (
+            !this.client
+                .pods()
+                .withLabels(matchLabels)
+                .list()
+                .items
+                .isNullOrEmpty()
+        ) {
+            logger.info { "Wait for pods with label ${matchLabels.toString()} to be deleted." }
+            Thread.sleep(1000)
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/TopicManager.kt b/theodolite/src/main/kotlin/theodolite/k8s/TopicManager.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f2afd71f6e4b4cf8e7106a8fc8a9bd113d9f36e6
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/TopicManager.kt
@@ -0,0 +1,111 @@
+package theodolite.k8s
+
+import mu.KotlinLogging
+import org.apache.kafka.clients.admin.AdminClient
+import org.apache.kafka.clients.admin.CreateTopicsResult
+import org.apache.kafka.clients.admin.NewTopic
+import java.lang.Thread.sleep
+
+private val logger = KotlinLogging.logger {}
+private const val RETRY_TIME = 2000L
+
+/**
+ * Manages the topics related tasks
+ * @param kafkaConfig Kafka configuration as a Map
+ * @constructor Creates a KafkaAdminClient
+ */
+class TopicManager(private val kafkaConfig: Map<String, Any>) {
+
+    /**
+     * Create topics.
+     * @param newTopics Collection of all topic that should be created
+     */
+    fun createTopics(newTopics: Collection<NewTopic>) {
+        val kafkaAdmin: AdminClient = AdminClient.create(this.kafkaConfig)
+        lateinit var result: CreateTopicsResult
+
+        do {
+            var retryCreation = false
+            try {
+                result = kafkaAdmin.createTopics(newTopics)
+                result.all().get() // wait for the future to be completed
+            } catch (e: Exception) { // TopicExistsException
+                logger.warn { "Error during topic creation. Error is: ${e.message}" }
+                logger.info { "Remove existing topics." }
+                delete(newTopics.map { topic -> topic.name() }, kafkaAdmin)
+                logger.info { "Will retry the topic creation in ${RETRY_TIME / 1000} seconds." }
+                sleep(RETRY_TIME)
+                retryCreation = true
+            }
+        } while (retryCreation)
+
+        logger.info {
+            "Topics creation finished with result: ${
+                result
+                    .values()
+                    .map { it.key + ": " + it.value.isDone }
+                    .joinToString(separator = ",")
+            } "
+        }
+        kafkaAdmin.close()
+    }
+
+    /**
+     * Remove topics.
+     * @param topics Collection of names for the topics to remove.
+     */
+    fun removeTopics(topics: List<String>) {
+        val kafkaAdmin: AdminClient = AdminClient.create(this.kafkaConfig)
+        val currentTopics = kafkaAdmin.listTopics().names().get()
+        delete(currentTopics.filter { matchRegex(it, topics) }, kafkaAdmin)
+        kafkaAdmin.close()
+    }
+
+    /**
+     * This function checks whether one string in `topics` can be used as prefix of a regular expression
+     * to create the string `existingTopic`.
+     *
+     * @param existingTopic string for which should be checked if it could be created.
+     * @param topics list of string which are used as possible prefixes to create `existingTopic`.
+     * @return true, `existingTopics` matches a created regex, else false.
+     */
+    private fun matchRegex(existingTopic: String, topics: List<String>): Boolean {
+        for (t in topics) {
+            val regex = t.toRegex()
+            if (regex.matches(existingTopic)) {
+                return true
+            }
+        }
+        return false
+    }
+
+    private fun delete(topics: List<String>, kafkaAdmin: AdminClient) {
+        var deleted = false
+
+        while (!deleted) {
+            try {
+                val result = kafkaAdmin.deleteTopics(topics)
+                result.all().get() // wait for the future to be completed
+                logger.info {
+                    "Topics deletion finished with result: ${
+                        result.values().map { it.key + ": " + it.value.isDone }
+                            .joinToString(separator = ",")
+                    }"
+                }
+            } catch (e: Exception) {
+                logger.error { "Error while removing topics: ${e.message}" }
+                logger.info { "Existing topics are: ${kafkaAdmin.listTopics().names().get()}." }
+            }
+
+            val toDelete = topics.filter { kafkaAdmin.listTopics().names().get().contains(it) }
+
+            if (toDelete.isNullOrEmpty()) {
+                deleted = true
+            } else {
+                logger.info { "Deletion of Kafka topics failed, will retry in ${RETRY_TIME / 1000} seconds." }
+                sleep(RETRY_TIME)
+            }
+        }
+    }
+
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/AbstractK8sLoader.kt b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/AbstractK8sLoader.kt
new file mode 100644
index 0000000000000000000000000000000000000000..862de14e2a7a4721e15215b0a1389e14f943fe24
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/AbstractK8sLoader.kt
@@ -0,0 +1,73 @@
+package theodolite.k8s.resourceLoader
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import mu.KotlinLogging
+import theodolite.k8s.K8sContextFactory
+
+private val logger = KotlinLogging.logger {}
+
+abstract class AbstractK8sLoader: K8sResourceLoader {
+
+    fun loadK8sResource(kind: String, resourceString: String): KubernetesResource {
+        return when (kind.replaceFirst(kind[0],kind[0].toUpperCase())) {
+            "Deployment" -> loadDeployment(resourceString)
+            "Service" -> loadService(resourceString)
+            "ServiceMonitor" -> loadServiceMonitor(resourceString)
+            "ConfigMap" -> loadConfigmap(resourceString)
+            "StatefulSet" -> loadStatefulSet(resourceString)
+            "Execution" -> loadExecution(resourceString)
+            "Benchmark" -> loadBenchmark(resourceString)
+            else -> {
+                logger.error { "Error during loading of unspecified resource Kind $kind" }
+                throw java.lang.IllegalArgumentException("error while loading resource with kind: $kind")
+            }
+        }
+    }
+
+    fun <T> loadGenericResource(resourceString: String, f: (String) -> T): T {
+        var resource: T? = null
+
+        try {
+            resource = f(resourceString)
+        } catch (e: Exception) {
+            logger.warn { e }
+        }
+
+        if (resource == null) {
+            throw IllegalArgumentException("The Resource: $resourceString could not be loaded")
+        }
+        return resource
+    }
+
+
+
+    override fun loadServiceMonitor(resource: String): KubernetesResource {
+        val context = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "monitoring.coreos.com",
+            plural = "servicemonitors"
+        )
+        return loadCustomResourceWrapper(resource, context)
+    }
+
+    override fun loadExecution(resource: String): KubernetesResource {
+        val context = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "theodolite.com",
+            plural = "executions"
+        )
+        return loadCustomResourceWrapper(resource, context)
+    }
+
+    override fun loadBenchmark(resource: String): KubernetesResource {
+        val context = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "theodolite.com",
+            plural = "benchmarks"
+        )
+        return loadCustomResourceWrapper(resource, context)
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoader.kt b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoader.kt
new file mode 100644
index 0000000000000000000000000000000000000000..c123ab2958132cb43ad188136f738b561e91310b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoader.kt
@@ -0,0 +1,15 @@
+package theodolite.k8s.resourceLoader
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+
+interface K8sResourceLoader {
+    fun loadDeployment(resource: String): KubernetesResource
+    fun loadService(resource: String): KubernetesResource
+    fun loadStatefulSet(resource: String): KubernetesResource
+    fun loadExecution(resource: String): KubernetesResource
+    fun loadBenchmark(resource: String): KubernetesResource
+    fun loadConfigmap(resource: String): KubernetesResource
+    fun loadServiceMonitor(resource: String): KubernetesResource
+    fun loadCustomResourceWrapper(resource: String, context: CustomResourceDefinitionContext): KubernetesResource
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoaderFromFile.kt b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoaderFromFile.kt
new file mode 100644
index 0000000000000000000000000000000000000000..08f34e1d67c9821c9f9a07a49f4ba8683a072611
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoaderFromFile.kt
@@ -0,0 +1,75 @@
+package theodolite.k8s.resourceLoader
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+import theodolite.k8s.CustomResourceWrapper
+import theodolite.util.YamlParserFromFile
+
+/**
+ * Used to load different Kubernetes resources.
+ * Supports: Deployments, Services, ConfigMaps, and CustomResources.
+ * @param client KubernetesClient used to deploy or remove.
+ */
+class K8sResourceLoaderFromFile(private val client: NamespacedKubernetesClient): AbstractK8sLoader(),
+    K8sResourceLoader {
+
+    /**
+     * Parses a Service from a service yaml
+     * @param resource of the yaml file
+     * @return Service from fabric8
+     */
+    override fun loadService(resource: String): Service {
+        return loadGenericResource(resource) { x: String -> client.services().load(x).get() }
+    }
+
+
+    /**
+     * Parses a CustomResource from a yaml
+     * @param path of the yaml file
+     * @param context specific crd context for this custom resource
+     * @return  CustomResourceWrapper from fabric8
+     */
+    override fun loadCustomResourceWrapper(resource: String, context: CustomResourceDefinitionContext): CustomResourceWrapper {
+       return loadGenericResource(resource) {
+           CustomResourceWrapper(
+               YamlParserFromFile().parse(
+                   resource,
+                   HashMap<String, String>()::class.java
+               )!!,
+               context
+           )
+       }
+   }
+
+    /**
+     * Parses a Deployment from a Deployment yaml
+     * @param resource of the yaml file
+     * @return Deployment from fabric8
+     */
+    override fun loadDeployment(resource: String): Deployment {
+        return loadGenericResource(resource) { x: String -> client.apps().deployments().load(x).get() }
+    }
+
+    /**
+     * Parses a ConfigMap from a ConfigMap yaml
+     * @param resource of the yaml file
+     * @return ConfigMap from fabric8
+     */
+    override fun loadConfigmap(resource: String): ConfigMap {
+        return loadGenericResource(resource) { x: String -> client.configMaps().load(x).get() }
+    }
+
+    /**
+     * Parses a StatefulSet from a StatefulSet yaml
+     * @param resource of the yaml file
+     * @return StatefulSet from fabric8
+     */
+    override fun loadStatefulSet(resource: String): KubernetesResource {
+        return loadGenericResource(resource) { x: String -> client.apps().statefulSets().load(x).get() }
+
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoaderFromString.kt b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoaderFromString.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e9611aaa82870dfb676820029cf42c5aab63d672
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/k8s/resourceLoader/K8sResourceLoaderFromString.kt
@@ -0,0 +1,60 @@
+package theodolite.k8s.resourceLoader
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+import theodolite.k8s.CustomResourceWrapper
+import theodolite.util.YamlParserFromString
+import java.io.ByteArrayInputStream
+
+class K8sResourceLoaderFromString(private val client: NamespacedKubernetesClient): AbstractK8sLoader(),
+    K8sResourceLoader {
+
+    @OptIn(ExperimentalStdlibApi::class)
+    override fun loadService(resource: String): KubernetesResource {
+        return loadGenericResource(resource) { x: String ->
+            val stream = ByteArrayInputStream(x.encodeToByteArray())
+            client.services().load(stream).get() }
+    }
+
+    @OptIn(ExperimentalStdlibApi::class)
+    override fun loadDeployment(resource: String): Deployment {
+        return loadGenericResource(resource) { x: String ->
+            val stream = ByteArrayInputStream(x.encodeToByteArray())
+            client.apps().deployments().load(stream).get() }
+    }
+
+    @OptIn(ExperimentalStdlibApi::class)
+    override fun loadConfigmap(resource: String): ConfigMap {
+        return loadGenericResource(resource) { x: String ->
+            val stream = ByteArrayInputStream(x.encodeToByteArray())
+            client.configMaps().load(stream).get() }
+    }
+
+    @OptIn(ExperimentalStdlibApi::class)
+    override fun loadStatefulSet(resource: String): KubernetesResource {
+        return loadGenericResource(resource) { x: String ->
+            val stream = ByteArrayInputStream(x.encodeToByteArray())
+            client.apps().statefulSets().load(stream).get() }
+    }
+
+    /**
+     * Parses a CustomResource from a yaml
+     * @param resource of the yaml file
+     * @param context specific crd context for this custom resource
+     * @return  CustomResourceWrapper from fabric8
+     */
+    override fun loadCustomResourceWrapper(resource: String, context: CustomResourceDefinitionContext): CustomResourceWrapper {
+        return loadGenericResource(resource) {
+            CustomResourceWrapper(
+                YamlParserFromString().parse(
+                    resource,
+                    HashMap<String, String>()::class.java
+                )!!,
+                context
+            )
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkCRD.kt b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkCRD.kt
new file mode 100644
index 0000000000000000000000000000000000000000..b6468fff523e57b124e144d5b9fef6477973655a
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkCRD.kt
@@ -0,0 +1,19 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.HasMetadata
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.model.annotation.Group
+import io.fabric8.kubernetes.model.annotation.Kind
+import io.fabric8.kubernetes.model.annotation.Version
+import theodolite.benchmark.KubernetesBenchmark
+
+@JsonDeserialize
+@Version("v1")
+@Group("theodolite.com")
+@Kind("benchmark")
+class BenchmarkCRD(
+    var spec: KubernetesBenchmark = KubernetesBenchmark(),
+    var status: BenchmarkStatus = BenchmarkStatus()
+) : CustomResource<KubernetesBenchmark, BenchmarkStatus>(), Namespaced, HasMetadata
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkExecutionList.kt b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkExecutionList.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2b2dcc07f9c37f1712109e3d092f2db0c139e1c8
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkExecutionList.kt
@@ -0,0 +1,5 @@
+package theodolite.model.crd
+
+import io.fabric8.kubernetes.client.CustomResourceList
+
+class BenchmarkExecutionList : CustomResourceList<ExecutionCRD>()
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkStates.kt b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkStates.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f52f2c168765ebb8bcc4f390795aa470b968021b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkStates.kt
@@ -0,0 +1,6 @@
+package theodolite.model.crd
+
+enum class BenchmarkStates(val value: String) {
+    PENDING("Pending"),
+    READY("Ready")
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkStatus.kt b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkStatus.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f51cb7a76d015d6ecd900279e68d41baa26e876a
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/BenchmarkStatus.kt
@@ -0,0 +1,11 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Namespaced
+
+@JsonDeserialize
+class BenchmarkStatus: KubernetesResource, Namespaced {
+    var resourceSetsState = "-"
+
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionCRD.kt b/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionCRD.kt
new file mode 100644
index 0000000000000000000000000000000000000000..659621e8c3b1d5308a10d81240575dd3d432b53f
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionCRD.kt
@@ -0,0 +1,18 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.model.annotation.Group
+import io.fabric8.kubernetes.model.annotation.Kind
+import io.fabric8.kubernetes.model.annotation.Version
+import theodolite.benchmark.BenchmarkExecution
+
+@JsonDeserialize
+@Version("v1")
+@Group("theodolite.com")
+@Kind("execution")
+class ExecutionCRD(
+    var spec: BenchmarkExecution = BenchmarkExecution(),
+    var status: ExecutionStatus = ExecutionStatus()
+) : CustomResource<BenchmarkExecution, ExecutionStatus>(), Namespaced
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionStates.kt b/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionStates.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ad68bf380b18af1a654c201817bb7fc982804c8b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionStates.kt
@@ -0,0 +1,12 @@
+package theodolite.model.crd
+
+enum class ExecutionStates(val value: String) {
+    // Execution states
+    RUNNING("Running"),
+    PENDING("Pending"),
+    FAILURE("Failure"),
+    FINISHED("Finished"),
+    RESTART("Restart"),
+    INTERRUPTED("Interrupted"),
+    NO_STATE("NoState"),
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionStatus.kt b/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionStatus.kt
new file mode 100644
index 0000000000000000000000000000000000000000..252738959762aa5d0732babc5589c698d7bd4e9f
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/ExecutionStatus.kt
@@ -0,0 +1,11 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Namespaced
+
+@JsonDeserialize
+class ExecutionStatus : KubernetesResource, Namespaced {
+    var executionState: String = ""
+    var executionDuration: String = "-"
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/model/crd/KubernetesBenchmarkList.kt b/theodolite/src/main/kotlin/theodolite/model/crd/KubernetesBenchmarkList.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8ad0a493d948bf5f78741052100766dcf6e316ec
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/model/crd/KubernetesBenchmarkList.kt
@@ -0,0 +1,5 @@
+package theodolite.model.crd
+
+import io.fabric8.kubernetes.client.CustomResourceList
+
+class KubernetesBenchmarkList : CustomResourceList<BenchmarkCRD>()
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/AbstractPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/AbstractPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..df80e9cbd2503685a7dbed35db5319920dfc42cb
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/AbstractPatcher.kt
@@ -0,0 +1,24 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+
+/**
+ * A Patcher is able to modify values of a Kubernetes resource, see [Patcher].
+ *
+ * An AbstractPatcher is created with up to three parameters.
+ *
+ * @param k8sResource The Kubernetes resource to be patched.
+ * @param container *(optional)* The name of the container to be patched
+ * @param variableName *(optional)* The variable name to be patched
+ *
+ *
+ * **For example** to patch the load dimension of a load generator, the patcher should be created as follow:
+ *
+ * k8sResource: `uc-1-workload-generator.yaml`
+ * container: `workload`
+ * variableName: `NUM_SENSORS`
+ *
+ */
+abstract class AbstractPatcher(
+    k8sResource: KubernetesResource
+) : Patcher
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/ConfigOverrideModifier.kt b/theodolite/src/main/kotlin/theodolite/patcher/ConfigOverrideModifier.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8f77b1b95f3bf5cc9422cda55cb261048cebaeb6
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/ConfigOverrideModifier.kt
@@ -0,0 +1,39 @@
+package theodolite.patcher
+
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.util.ConfigurationOverride
+import theodolite.util.PatcherDefinition
+
+/**
+ * The ConfigOverrideModifier makes it possible to update the configuration overrides of an execution.
+ *
+ * @property execution execution for which the config overrides should be updated
+ * @property resources list of all resources that should be updated.
+ */
+class ConfigOverrideModifier(val execution: BenchmarkExecution, val resources: List<String>) {
+
+    /**
+     * Adds a [LabelPatcher] to the configOverrides.
+     *
+     * @param labelValue value argument for the label patcher
+     * @param labelName  label name argument for the label patcher
+     */
+    fun setAdditionalLabels(
+        labelValue: String,
+        labelName: String
+    ) {
+        val additionalConfigOverrides = mutableListOf<ConfigurationOverride>()
+        resources.forEach {
+            run {
+                val configurationOverride = ConfigurationOverride()
+                configurationOverride.patcher = PatcherDefinition()
+                configurationOverride.patcher.type = "LabelPatcher"
+                configurationOverride.patcher.properties = mutableMapOf("variableName" to labelName)
+                configurationOverride.patcher.resource = it
+                configurationOverride.value = labelValue
+                additionalConfigOverrides.add(configurationOverride)
+            }
+        }
+        execution.configOverrides.addAll(additionalConfigOverrides)
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/DataVolumeLoadGeneratorReplicaPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/DataVolumeLoadGeneratorReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..bdc107910edc8ddfb41e7757c775977086a25a26
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/DataVolumeLoadGeneratorReplicaPatcher.kt
@@ -0,0 +1,38 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+
+/**
+ * The DataVolumeLoadGeneratorReplicaPatcher takes the total load that should be generated
+ * and computes the number of instances needed for this load based on the `maxVolume`
+ * ((load + maxVolume - 1) / maxVolume) and calculates the load per instance
+ * (loadPerInstance = load / instances).
+ * The number of instances are set for the load generator and the given variable is set to the
+ * load per instance.
+ *
+ * @property k8sResource Kubernetes resource to be patched.
+ * @property maxVolume per load generator instance
+ * @property container Container to be patched.
+ * @property variableName Name of the environment variable to be patched.
+ */
+class DataVolumeLoadGeneratorReplicaPatcher(
+    k8sResource: KubernetesResource,
+    private val maxVolume: Int,
+    container: String,
+    variableName: String
+) : AbstractPatcher(k8sResource) {
+
+    private val replicaPatcher = ReplicaPatcher(k8sResource)
+    private val envVarPatcher = EnvVarPatcher(k8sResource, container, variableName)
+
+    override fun <T> patch(value: T) {
+        // calculate number of load generator instances and load per instance
+        val load = Integer.parseInt(value.toString())
+        val loadGenInstances = (load + maxVolume - 1) / maxVolume
+        val loadPerInstance = load / loadGenInstances
+
+        // Patch instance values and load value of generators
+        replicaPatcher.patch(loadGenInstances.toString())
+        envVarPatcher.patch(loadPerInstance.toString())
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/EnvVarPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/EnvVarPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..416aec74a3af9b74594f5e6cd018682bf91cbf63
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/EnvVarPatcher.kt
@@ -0,0 +1,60 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.Container
+import io.fabric8.kubernetes.api.model.EnvVar
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The EnvVarPatcher allows to modify the value of an environment variable
+ *
+ * @property k8sResource Kubernetes resource to be patched.
+ * @property container Container to be patched.
+ * @property variableName Name of the environment variable to be patched.
+ */
+class EnvVarPatcher(
+    private val k8sResource: KubernetesResource,
+    private val container: String,
+    private val variableName: String
+) : AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            this.setEnv(
+                k8sResource, this.container,
+                mapOf(this.variableName to value) as Map<kotlin.String, kotlin.String>
+            )
+        }
+    }
+
+    /**
+     * Sets the ContainerEnvironmentVariables, creates new if variable does not exist.
+     * @param container - The Container
+     * @param map - Map of k=Name,v =Value of EnvironmentVariables
+     */
+    private fun setContainerEnv(container: Container, map: Map<String, String>) {
+        map.forEach { (k, v) ->
+            // filter for matching name and set value
+            val x = container.env.filter { envVar -> envVar.name == k }
+
+            if (x.isEmpty()) {
+                val newVar = EnvVar()
+                newVar.name = k
+                newVar.value = v
+                container.env.add(newVar)
+            } else {
+                x.forEach {
+                    it.value = v
+                }
+            }
+        }
+    }
+
+    /**
+     * Set the environment Variable for a container
+     */
+    private fun setEnv(workloadDeployment: Deployment, containerName: String, map: Map<String, String>) {
+        workloadDeployment.spec.template.spec.containers.filter { it.name == containerName }
+            .forEach { setContainerEnv(it, map) }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/ImagePatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/ImagePatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8f6753372076c119324dc962112928253633b6b0
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/ImagePatcher.kt
@@ -0,0 +1,27 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+
+/**
+ * The Image patcher allows to change the image of a container.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param container Container to be patched.
+ */
+class ImagePatcher(private val k8sResource: KubernetesResource, private val container: String) :
+    AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(imagePath: String) {
+        if (k8sResource is Deployment) {
+            k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                it.image = imagePath as kotlin.String
+            }
+        } else if (k8sResource is StatefulSet) {
+            k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                it.image = imagePath as kotlin.String
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/LabelPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/LabelPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2f8c703afa9e826a79f0785abef493d2d448ac74
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/LabelPatcher.kt
@@ -0,0 +1,49 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.CustomResource
+
+class LabelPatcher(private val k8sResource: KubernetesResource, val variableName: String) :
+    AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(labelValue: String) {
+        if (labelValue is kotlin.String) {
+            when (k8sResource) {
+                is Deployment -> {
+                    if (k8sResource.metadata.labels == null) {
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is StatefulSet -> {
+                    if (k8sResource.metadata.labels == null) {
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is Service -> {
+                    if (k8sResource.metadata.labels == null) {
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is ConfigMap -> {
+                    if (k8sResource.metadata.labels == null) {
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is CustomResource<*, *> -> {
+                    if (k8sResource.metadata.labels == null) {
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/NodeSelectorPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/NodeSelectorPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0e8cd553a6c6a9ed6fa2c8cc1b84e4cfebe79d73
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/NodeSelectorPatcher.kt
@@ -0,0 +1,19 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The Node selector patcher make it possible to set the NodeSelector of a Kubernetes deployment.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param variableName The `label-key` of the node for which the `label-value` is to be patched.
+ */
+class NodeSelectorPatcher(private val k8sResource: KubernetesResource, private val variableName: String) :
+    AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            k8sResource.spec.template.spec.nodeSelector = mapOf(variableName to value as kotlin.String)
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/NumNestedGroupsLoadGeneratorReplicaPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/NumNestedGroupsLoadGeneratorReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..c617917e6894c3a30779dd4257a96365ded35481
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/NumNestedGroupsLoadGeneratorReplicaPatcher.kt
@@ -0,0 +1,23 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import kotlin.math.pow
+
+class NumNestedGroupsLoadGeneratorReplicaPatcher(
+    private val k8sResource: KubernetesResource,
+    private val numSensors: String,
+    private val loadGenMaxRecords: String
+) :
+    AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            if (value is kotlin.String) {
+                val approxNumSensors = numSensors.toDouble().pow(Integer.parseInt(value).toDouble())
+                val loadGenInstances =
+                    (approxNumSensors + loadGenMaxRecords.toDouble() - 1) / loadGenMaxRecords.toDouble()
+                this.k8sResource.spec.replicas = loadGenInstances.toInt()
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/NumSensorsLoadGeneratorReplicaPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/NumSensorsLoadGeneratorReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..86bb37db3cb9fd0d3bca1690d5eb4e622329a9bc
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/NumSensorsLoadGeneratorReplicaPatcher.kt
@@ -0,0 +1,21 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+
+class NumSensorsLoadGeneratorReplicaPatcher(
+    private val k8sResource: KubernetesResource,
+    private val loadGenMaxRecords: String
+) :
+    AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            if (value is kotlin.String) {
+                val loadGenInstances =
+                    (Integer.parseInt(value) + loadGenMaxRecords.toInt() - 1) / loadGenMaxRecords.toInt()
+                this.k8sResource.spec.replicas = loadGenInstances
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/Patcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/Patcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..84b886cb4f06b3e667eb8b8aeaa622e1ee54852e
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/Patcher.kt
@@ -0,0 +1,20 @@
+package theodolite.patcher
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * A patcher can be used to modify values of Kubernetes resource.
+ *
+ * @constructor Create empty Patcher
+ */
+@RegisterForReflection
+interface Patcher {
+    /**
+     * The patch method modifies a value in the definition of a
+     * Kubernetes resource.
+     *
+     * @param T The type of value
+     * @param value The value to be used.
+     */
+    fun <T> patch(value: T)
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/PatcherDefinitionFactory.kt b/theodolite/src/main/kotlin/theodolite/patcher/PatcherDefinitionFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6a1f993e2ac327ec242a8a5bafc3e6cc43475710
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/PatcherDefinitionFactory.kt
@@ -0,0 +1,27 @@
+package theodolite.patcher
+
+import theodolite.util.PatcherDefinition
+import theodolite.util.TypeName
+
+/**
+ * The PatcherDefinition Factory creates a [PatcherDefinition]s.
+ *
+ * @constructor Create empty Patcher definition factory.
+ */
+class PatcherDefinitionFactory {
+    /**
+     * Creates a list of PatcherDefinitions
+     *
+     * @param requiredType indicates the required PatcherDefinitions
+     *     (for example `NumSensors`)
+     * @param patcherTypes list of TypeNames. A TypeName contains a type
+     *     (for example `NumSensors`) and a list of
+     *     PatcherDefinitions, which are related to this type.
+     * @return A list of PatcherDefinitions which corresponds to the
+     *     value of the requiredType.
+     */
+    fun createPatcherDefinition(requiredType: String, patcherTypes: List<TypeName>): List<PatcherDefinition> {
+        return patcherTypes.firstOrNull { type -> type.typeName == requiredType }
+            ?.patchers ?: throw IllegalArgumentException("typeName $requiredType not found.")
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/PatcherFactory.kt b/theodolite/src/main/kotlin/theodolite/patcher/PatcherFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ebad5de74a6b819dbf7887dfad91faac37ed5074
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/PatcherFactory.kt
@@ -0,0 +1,96 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import theodolite.util.DeploymentFailedException
+import theodolite.util.InvalidPatcherConfigurationException
+import theodolite.util.PatcherDefinition
+
+/**
+ * The Patcher factory creates [Patcher]s
+ *
+ * @constructor Creates an empty PatcherFactory.
+ */
+class PatcherFactory {
+    /**
+     * Create patcher based on the given [PatcherDefinition] and
+     * the list of KubernetesResources.
+     *
+     * @param patcherDefinition The [PatcherDefinition] for which are
+     *     [Patcher] should be created.
+     * @param k8sResources List of all available Kubernetes resources.
+     *     This is a list of pairs<String, KubernetesResource>:
+     *     The frist corresponds to the filename where the resource is defined.
+     *     The second corresponds to the concrete [KubernetesResource] that should be patched.
+     * @return The created [Patcher].
+     * @throws IllegalArgumentException if no patcher can be created.
+     */
+    fun createPatcher(
+        patcherDefinition: PatcherDefinition,
+        k8sResources: Collection<Pair<String, KubernetesResource>>
+    ): Patcher {
+        val resource =
+            k8sResources.filter { it.first == patcherDefinition.resource }
+                .map { resource -> resource.second }
+                .firstOrNull()
+                ?: throw InvalidPatcherConfigurationException("Could not find resource ${patcherDefinition.resource}")
+
+        return try {
+            when (patcherDefinition.type) {
+                "ReplicaPatcher" -> ReplicaPatcher(
+                    k8sResource = resource
+                )
+                "NumNestedGroupsLoadGeneratorReplicaPatcher" -> NumNestedGroupsLoadGeneratorReplicaPatcher(
+                    k8sResource = resource,
+                    loadGenMaxRecords = patcherDefinition.properties["loadGenMaxRecords"]!!,
+                    numSensors = patcherDefinition.properties["numSensors"]!!
+                )
+                "NumSensorsLoadGeneratorReplicaPatcher" -> NumSensorsLoadGeneratorReplicaPatcher(
+                    k8sResource = resource,
+                    loadGenMaxRecords = patcherDefinition.properties["loadGenMaxRecords"]!!
+                )
+                "DataVolumeLoadGeneratorReplicaPatcher" -> DataVolumeLoadGeneratorReplicaPatcher(
+                    k8sResource = resource,
+                    maxVolume = patcherDefinition.properties["maxVolume"]!!.toInt(),
+                    container = patcherDefinition.properties["container"]!!,
+                    variableName = patcherDefinition.properties["variableName"]!!
+                )
+                "EnvVarPatcher" -> EnvVarPatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"]!!,
+                    variableName = patcherDefinition.properties["variableName"]!!
+                )
+                "NodeSelectorPatcher" -> NodeSelectorPatcher(
+                    k8sResource = resource,
+                    variableName = patcherDefinition.properties["variableName"]!!
+                )
+                "ResourceLimitPatcher" -> ResourceLimitPatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"]!!,
+                    limitedResource = patcherDefinition.properties["limitedResource"]!!
+                )
+                "ResourceRequestPatcher" -> ResourceRequestPatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"]!!,
+                    requestedResource = patcherDefinition.properties["requestedResource"]!!
+                )
+                "SchedulerNamePatcher" -> SchedulerNamePatcher(
+                    k8sResource = resource
+                )
+                "LabelPatcher" -> LabelPatcher(
+                    k8sResource = resource,
+                    variableName = patcherDefinition.properties["variableName"]!!
+                )
+                "ImagePatcher" -> ImagePatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"]!!
+                )
+                else -> throw InvalidPatcherConfigurationException("Patcher type ${patcherDefinition.type} not found.")
+            }
+        } catch (e: NullPointerException) {
+            throw InvalidPatcherConfigurationException(
+                "Could not create patcher with type ${patcherDefinition.type}" +
+                        " Probably a required patcher argument was not specified.", e
+            )
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/ReplicaPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/ReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4cc35f2ed74f9e366c266c3f98f1b3d36d4ba1b8
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/ReplicaPatcher.kt
@@ -0,0 +1,19 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The Replica [Patcher] modifies the number of replicas for the given Kubernetes deployment.
+ *
+ * @param k8sResource  Kubernetes resource to be patched.
+ */
+class ReplicaPatcher(private val k8sResource: KubernetesResource) : AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            if (value is kotlin.String) {
+                this.k8sResource.spec.replicas = Integer.parseInt(value)
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/ResourceLimitPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/ResourceLimitPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9dcdffa0407dd4fdaf2d9b0a898bcdf6cebe5a8b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/ResourceLimitPatcher.kt
@@ -0,0 +1,60 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.Container
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Quantity
+import io.fabric8.kubernetes.api.model.ResourceRequirements
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import theodolite.util.InvalidPatcherConfigurationException
+
+/**
+ * The Resource limit [Patcher] set resource limits for deployments and statefulSets.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param container Container to be patched.
+ * @param limitedResource The resource to be limited (e.g. **cpu or memory**)
+ */
+class ResourceLimitPatcher(
+    private val k8sResource: KubernetesResource,
+    private val container: String,
+    private val limitedResource: String
+) : AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(value: String) {
+        when (k8sResource) {
+            is Deployment -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setLimits(it, value as kotlin.String)
+                }
+            }
+            is StatefulSet -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setLimits(it, value as kotlin.String)
+                }
+            }
+            else -> {
+                throw InvalidPatcherConfigurationException("ResourceLimitPatcher not applicable for $k8sResource")
+            }
+        }
+    }
+
+    private fun setLimits(container: Container, value: String) {
+        when {
+            container.resources == null -> {
+                val resource = ResourceRequirements()
+                resource.limits = mapOf(limitedResource to Quantity(value))
+                container.resources = resource
+            }
+            container.resources.limits.isEmpty() -> {
+                container.resources.limits = mapOf(limitedResource to Quantity(value))
+            }
+            else -> {
+                val values = mutableMapOf<String, Quantity>()
+                container.resources.limits.forEach { entry -> values[entry.key] = entry.value }
+                values[limitedResource] = Quantity(value)
+                container.resources.limits = values
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/ResourceRequestPatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/ResourceRequestPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..24cdde40f7f78bd67d115b2dc44f47e180f51ee2
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/ResourceRequestPatcher.kt
@@ -0,0 +1,60 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.Container
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Quantity
+import io.fabric8.kubernetes.api.model.ResourceRequirements
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import theodolite.util.InvalidPatcherConfigurationException
+
+/**
+ * The Resource request [Patcher] set resource limits for deployments and statefulSets.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param container Container to be patched.
+ * @param requestedResource The resource to be requested (e.g. **cpu or memory**)
+ */
+class ResourceRequestPatcher(
+    private val k8sResource: KubernetesResource,
+    private val container: String,
+    private val requestedResource: String
+) : AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(value: String) {
+        when (k8sResource) {
+            is Deployment -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setRequests(it, value as kotlin.String)
+                }
+            }
+            is StatefulSet -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setRequests(it, value as kotlin.String)
+                }
+            }
+            else -> {
+                throw InvalidPatcherConfigurationException("ResourceRequestPatcher not applicable for $k8sResource")
+            }
+        }
+    }
+
+    private fun setRequests(container: Container, value: String) {
+        when {
+            container.resources == null -> {
+                val resource = ResourceRequirements()
+                resource.requests = mapOf(requestedResource to Quantity(value))
+                container.resources = resource
+            }
+            container.resources.requests.isEmpty() -> {
+                container.resources.requests = mapOf(requestedResource to Quantity(value))
+            }
+            else -> {
+                val values = mutableMapOf<String, Quantity>()
+                container.resources.requests.forEach { entry -> values[entry.key] = entry.value }
+                values[requestedResource] = Quantity(value)
+                container.resources.requests = values
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/patcher/SchedulerNamePatcher.kt b/theodolite/src/main/kotlin/theodolite/patcher/SchedulerNamePatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..348f0c50090a34c91221d3e099c3532375a578da
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/patcher/SchedulerNamePatcher.kt
@@ -0,0 +1,17 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The Scheduler name [Patcher] make it possible to set the scheduler which should
+ * be used to deploy the given deployment.
+ * @param k8sResource Kubernetes resource to be patched.
+ */
+class SchedulerNamePatcher(private val k8sResource: KubernetesResource) : Patcher {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            k8sResource.spec.template.spec.schedulerName = value as kotlin.String
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/StrategyFactory.kt b/theodolite/src/main/kotlin/theodolite/strategies/StrategyFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..829370e8ce1c181c1a4cb9fdd8ccf0ecefd48d3d
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/StrategyFactory.kt
@@ -0,0 +1,54 @@
+package theodolite.strategies
+
+import theodolite.execution.BenchmarkExecutor
+import theodolite.strategies.restriction.LowerBoundRestriction
+import theodolite.strategies.restriction.RestrictionStrategy
+import theodolite.strategies.searchstrategy.BinarySearch
+import theodolite.strategies.searchstrategy.FullSearch
+import theodolite.strategies.searchstrategy.LinearSearch
+import theodolite.strategies.searchstrategy.SearchStrategy
+import theodolite.util.Results
+
+/**
+ * Factory for creating [SearchStrategy] and [RestrictionStrategy] strategies.
+ */
+class StrategyFactory {
+
+    /**
+     * Create a [SearchStrategy].
+     *
+     * @param executor The [theodolite.execution.BenchmarkExecutor] that executes individual experiments.
+     * @param searchStrategyString Specifies the [SearchStrategy]. Must either be the string 'LinearSearch',
+     * or 'BinarySearch'.
+     *
+     * @throws IllegalArgumentException if the [SearchStrategy] was not one of the allowed options.
+     */
+    fun createSearchStrategy(executor: BenchmarkExecutor, searchStrategyString: String): SearchStrategy {
+        return when (searchStrategyString) {
+            "FullSearch" -> FullSearch(executor)
+            "LinearSearch" -> LinearSearch(executor)
+            "BinarySearch" -> BinarySearch(executor)
+            else -> throw IllegalArgumentException("Search Strategy $searchStrategyString not found")
+        }
+    }
+
+    /**
+     * Create a [RestrictionStrategy].
+     *
+     * @param results The [Results] saves the state of the Theodolite benchmark run.
+     * @param restrictionStrings Specifies the list of [RestrictionStrategy] that are used to restrict the amount
+     * of [theodolite.util.Resource] for a fixed LoadDimension. Must equal the string
+     * 'LowerBound'.
+     *
+     * @throws IllegalArgumentException if param searchStrategyString was not one of the allowed options.
+     */
+    fun createRestrictionStrategy(results: Results, restrictionStrings: List<String>): Set<RestrictionStrategy> {
+        return restrictionStrings
+            .map { restriction ->
+                when (restriction) {
+                    "LowerBound" -> LowerBoundRestriction(results)
+                    else -> throw IllegalArgumentException("Restriction Strategy $restrictionStrings not found")
+                }
+            }.toSet()
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/restriction/LowerBoundRestriction.kt b/theodolite/src/main/kotlin/theodolite/strategies/restriction/LowerBoundRestriction.kt
new file mode 100644
index 0000000000000000000000000000000000000000..13bfedfe055f2bd428137f89b2986f3967ec797c
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/restriction/LowerBoundRestriction.kt
@@ -0,0 +1,24 @@
+package theodolite.strategies.restriction
+
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+/**
+ * The [LowerBoundRestriction] sets the lower bound of the resources to be examined to the value
+ * needed to successfully execute the next smaller load.
+ *
+ * @param results [Result] object used as a basis to restrict the resources.
+ */
+class LowerBoundRestriction(results: Results) : RestrictionStrategy(results) {
+
+    override fun apply(load: LoadDimension, resources: List<Resource>): List<Resource> {
+        val maxLoad: LoadDimension? = this.results.getMaxBenchmarkedLoad(load)
+        var lowerBound: Resource? = this.results.getMinRequiredInstances(maxLoad)
+        if (lowerBound == null) {
+            lowerBound = resources[0]
+        }
+        return resources.filter { x -> x.get() >= lowerBound.get() }
+    }
+
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/restriction/RestrictionStrategy.kt b/theodolite/src/main/kotlin/theodolite/strategies/restriction/RestrictionStrategy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1ab7302d7898daad729b1c94c32d97138b5cdcf4
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/restriction/RestrictionStrategy.kt
@@ -0,0 +1,25 @@
+package theodolite.strategies.restriction
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+/**
+ * A 'Restriction Strategy' restricts a list of resources based on the current
+ * results of all previously performed benchmarks.
+ *
+ * @param results the [Results] object
+ */
+@RegisterForReflection
+abstract class RestrictionStrategy(val results: Results) {
+    /**
+     * Apply the restriction of the given resource list for the given load based on the results object.
+     *
+     * @param load [LoadDimension] for which a subset of resources are required.
+     * @param resources List of [Resource]s to be restricted.
+     * @return Returns a list containing only elements that have not been filtered out by the
+     * restriction (possibly empty).
+     */
+    abstract fun apply(load: LoadDimension, resources: List<Resource>): List<Resource>
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/BinarySearch.kt b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/BinarySearch.kt
new file mode 100644
index 0000000000000000000000000000000000000000..28e8194c699cd074026c8cb7e6f3ce4ec347023b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/BinarySearch.kt
@@ -0,0 +1,61 @@
+package theodolite.strategies.searchstrategy
+
+import mu.KotlinLogging
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ *  Binary-search-like implementation for determining the smallest suitable number of instances.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+class BinarySearch(benchmarkExecutor: BenchmarkExecutor) : SearchStrategy(benchmarkExecutor) {
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        val result = binarySearch(load, resources, 0, resources.size - 1)
+        if (result == -1) {
+            return null
+        }
+        return resources[result]
+    }
+
+    /**
+     * Apply binary search.
+     *
+     * @param load the load dimension to perform experiments for
+     * @param resources the list in which binary search is performed
+     * @param lower lower bound for binary search (inclusive)
+     * @param upper upper bound for binary search (inclusive)
+     */
+    private fun binarySearch(load: LoadDimension, resources: List<Resource>, lower: Int, upper: Int): Int {
+        if (lower > upper) {
+            throw IllegalArgumentException()
+        }
+        // special case:  length == 1 or 2
+        if (lower == upper) {
+            val res = resources[lower]
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            if (this.benchmarkExecutor.runExperiment(load, resources[lower])) return lower
+            else {
+                if (lower + 1 == resources.size) return -1
+                return lower + 1
+            }
+        } else {
+            // apply binary search for a list with
+            // length > 2 and adjust upper and lower depending on the result for `resources[mid]`
+            val mid = (upper + lower) / 2
+            val res = resources[mid]
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            if (this.benchmarkExecutor.runExperiment(load, resources[mid])) {
+                if (mid == lower) {
+                    return lower
+                }
+                return binarySearch(load, resources, lower, mid - 1)
+            } else {
+                return binarySearch(load, resources, mid + 1, upper)
+            }
+        }
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/CompositeStrategy.kt b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/CompositeStrategy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..41cc5c325163ade54469398e815fdb8d95c6e6cd
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/CompositeStrategy.kt
@@ -0,0 +1,30 @@
+package theodolite.strategies.searchstrategy
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.execution.BenchmarkExecutor
+import theodolite.strategies.restriction.RestrictionStrategy
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+/**
+ *  Composite strategy that combines a SearchStrategy and a set of RestrictionStrategy.
+ *
+ * @param searchStrategy the [SearchStrategy] that is executed as part of this [CompositeStrategy].
+ * @param restrictionStrategies the set of [RestrictionStrategy] that are connected conjunctive to restrict the [Resource]
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+@RegisterForReflection
+class CompositeStrategy(
+    benchmarkExecutor: BenchmarkExecutor,
+    private val searchStrategy: SearchStrategy,
+    val restrictionStrategies: Set<RestrictionStrategy>
+) : SearchStrategy(benchmarkExecutor) {
+
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        var restrictedResources = resources.toList()
+        for (strategy in this.restrictionStrategies) {
+            restrictedResources = restrictedResources.intersect(strategy.apply(load, resources)).toList()
+        }
+        return this.searchStrategy.findSuitableResource(load, restrictedResources)
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/FullSearch.kt b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/FullSearch.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cb0dd2d8ab528e42e8290f59f26c8b9b32f384c7
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/FullSearch.kt
@@ -0,0 +1,31 @@
+package theodolite.strategies.searchstrategy
+
+import mu.KotlinLogging
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * [SearchStrategy] that executes experiment for provides resources in a linear-search-like fashion, but **without
+ * stopping** once a suitable resource amount is found.
+ *
+ * @see LinearSearch for a SearchStrategy that stops once a suitable resource amount is found.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+class FullSearch(benchmarkExecutor: BenchmarkExecutor) : SearchStrategy(benchmarkExecutor) {
+
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        var minimalSuitableResources: Resource? = null
+        for (res in resources) {
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            val result = this.benchmarkExecutor.runExperiment(load, res)
+            if (result && minimalSuitableResources != null) {
+                minimalSuitableResources = res
+            }
+        }
+        return minimalSuitableResources
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/LinearSearch.kt b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/LinearSearch.kt
new file mode 100644
index 0000000000000000000000000000000000000000..85deaf6fa75437199bfc560404eb5b40bb4a986a
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/LinearSearch.kt
@@ -0,0 +1,25 @@
+package theodolite.strategies.searchstrategy
+
+import mu.KotlinLogging
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ *  Linear-search-like implementation for determining the smallest suitable number of instances.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+class LinearSearch(benchmarkExecutor: BenchmarkExecutor) : SearchStrategy(benchmarkExecutor) {
+
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        for (res in resources) {
+
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            if (this.benchmarkExecutor.runExperiment(load, res)) return res
+        }
+        return null
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/SearchStrategy.kt b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/SearchStrategy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4e304b010d4d56f6b5fe734a6b977361f93e57a1
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/strategies/searchstrategy/SearchStrategy.kt
@@ -0,0 +1,24 @@
+package theodolite.strategies.searchstrategy
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+/**
+ *  Base class for the implementation for SearchStrategies. SearchStrategies determine the smallest suitable number of instances.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+@RegisterForReflection
+abstract class SearchStrategy(val benchmarkExecutor: BenchmarkExecutor) {
+    /**
+     * Find smallest suitable resource from the specified resource list for the given load.
+     *
+     * @param load the [LoadDimension] to be tested.
+     * @param resources List of all possible [Resource]s.
+     *
+     * @return suitable resource for the specified load, or null if no suitable resource exists.
+     */
+    abstract fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource?
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/Config.kt b/theodolite/src/main/kotlin/theodolite/util/Config.kt
new file mode 100644
index 0000000000000000000000000000000000000000..afbf784e9d6d72939615e367b54891ecd95a3608
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/Config.kt
@@ -0,0 +1,18 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.strategies.searchstrategy.CompositeStrategy
+
+/**
+ * Config class that represents a configuration of a theodolite run.
+ *
+ * @param loads the [LoadDimension] of the execution
+ * @param resources the [Resource] of the execution
+ * @param compositeStrategy the [CompositeStrategy] of the execution
+ */
+@RegisterForReflection
+data class Config(
+    val loads: List<LoadDimension>,
+    val resources: List<Resource>,
+    val compositeStrategy: CompositeStrategy
+)
diff --git a/theodolite/src/main/kotlin/theodolite/util/Configuration.kt b/theodolite/src/main/kotlin/theodolite/util/Configuration.kt
new file mode 100644
index 0000000000000000000000000000000000000000..dac3b943e69bd7e208d318f2a788275f19db11e4
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/Configuration.kt
@@ -0,0 +1,18 @@
+package theodolite.util
+
+import theodolite.execution.ExecutionModes
+
+// Defaults
+private const val DEFAULT_NAMESPACE = "default"
+private const val DEFAULT_COMPONENT_NAME = "theodolite-operator"
+
+
+class Configuration(
+) {
+    companion object {
+        val NAMESPACE = System.getenv("NAMESPACE") ?: DEFAULT_NAMESPACE
+        val COMPONENT_NAME = System.getenv("COMPONENT_NAME") ?: DEFAULT_COMPONENT_NAME
+        val EXECUTION_MODE = System.getenv("MODE") ?: ExecutionModes.STANDALONE.value
+    }
+
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/ConfigurationOverride.kt b/theodolite/src/main/kotlin/theodolite/util/ConfigurationOverride.kt
new file mode 100644
index 0000000000000000000000000000000000000000..537b44721bb344c2cd7af71d29dc4fa3da5a7a33
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/ConfigurationOverride.kt
@@ -0,0 +1,21 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Representation of a configuration override.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class ConfigurationOverride {
+    /**
+     * Patcher of the configuration override.
+     */
+    lateinit var patcher: PatcherDefinition
+
+    /**
+     * Value of the patched configuration override.
+     */
+    lateinit var value: String
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/DeploymentFailedException.kt b/theodolite/src/main/kotlin/theodolite/util/DeploymentFailedException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9f4caedf3db1e09dca7924bf0035c6ace0b835d7
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/DeploymentFailedException.kt
@@ -0,0 +1,4 @@
+package theodolite.util
+
+
+open class DeploymentFailedException(message: String, e: Exception? = null) : TheodoliteException(message,e)
diff --git a/theodolite/src/main/kotlin/theodolite/util/EvaluationFailedException.kt b/theodolite/src/main/kotlin/theodolite/util/EvaluationFailedException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..c67ed7ffd79afc733a97dae05c3203f8e78722ea
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/EvaluationFailedException.kt
@@ -0,0 +1,4 @@
+package theodolite.util
+
+class EvaluationFailedException(message: String, e: Exception? = null) : ExecutionFailedException(message,e) {
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/ExecutionFailedException.kt b/theodolite/src/main/kotlin/theodolite/util/ExecutionFailedException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6566a451a3e273214f59962531b6bd17b33a850d
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/ExecutionFailedException.kt
@@ -0,0 +1,4 @@
+package theodolite.util
+
+open class ExecutionFailedException(message: String, e: Exception? = null) : TheodoliteException(message,e) {
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/util/ExecutionStateComparator.kt b/theodolite/src/main/kotlin/theodolite/util/ExecutionStateComparator.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8a6b0e9a49362afa401cf3c1279e7f7f6cddf85d
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/ExecutionStateComparator.kt
@@ -0,0 +1,19 @@
+package theodolite.util
+
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.ExecutionStates
+
+class ExecutionStateComparator(private val preferredState: ExecutionStates): Comparator<ExecutionCRD> {
+
+    /**
+     * Simple comparator which can be used to order a list of [ExecutionCRD] such that executions with
+     * status [ExecutionStates.RESTART] are before all other executions.
+     */
+    override fun compare(p0: ExecutionCRD, p1: ExecutionCRD): Int {
+       return when {
+            (p0 == null && p1 == null) -> 0
+            (p0.status.executionState == preferredState.value) -> -1
+            else -> 1
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/util/IOHandler.kt b/theodolite/src/main/kotlin/theodolite/util/IOHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..57032189412d0937e4d77ddbf4354c78ffcc71a3
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/IOHandler.kt
@@ -0,0 +1,94 @@
+package theodolite.util
+
+import com.google.gson.GsonBuilder
+import mu.KotlinLogging
+import java.io.File
+import java.io.PrintWriter
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The IOHandler handles most common I/O operations within the Theodolite framework
+ */
+class IOHandler {
+
+    /**
+     * The location in which Theodolite store result and configuration file are depends on
+     * the values of the environment variables `RESULT_FOLDER` and `CREATE_RESULTS_FOLDER`
+     *
+     * @return the URL of the result folder
+     */
+    fun getResultFolderURL(): String {
+        var resultsFolder: String = System.getenv("RESULTS_FOLDER") ?: ""
+        val createResultsFolder = System.getenv("CREATE_RESULTS_FOLDER") ?: "false"
+
+        if (resultsFolder != "") {
+            logger.info { "RESULT_FOLDER: $resultsFolder" }
+            val directory = File(resultsFolder)
+            if (!directory.exists()) {
+                logger.error { "Folder $resultsFolder does not exist" }
+                if (createResultsFolder.toBoolean()) {
+                    directory.mkdirs()
+                } else {
+                    throw IllegalArgumentException("Result folder not found")
+                }
+            }
+            resultsFolder += "/"
+        }
+        return resultsFolder
+    }
+
+    /**
+     * Read a file as String
+     *
+     * @param fileURL the URL of the file
+     * @return The content of the file as String
+     */
+    fun readFileAsString(fileURL: String): String {
+        return File(fileURL).inputStream().readBytes().toString(Charsets.UTF_8).trim()
+    }
+
+    /**
+     * Creates a JSON string of the given object and store them to file
+     *
+     * @param T class of the object to save
+     * @param objectToSave object which should be saved as file
+     * @param fileURL the URL of the file
+     */
+    fun <T> writeToJSONFile(objectToSave: T, fileURL: String) {
+        val gson = GsonBuilder().enableComplexMapKeySerialization().setPrettyPrinting().create()
+        writeStringToTextFile(fileURL, gson.toJson(objectToSave))
+    }
+
+    /**
+     * Write to CSV file
+     *
+     * @param fileURL the URL of the file
+     * @param data  the data to write in the file, as list of list, each subList corresponds to a row in the CSV file
+     * @param columns columns of the CSV file
+     */
+    fun writeToCSVFile(fileURL: String, data: List<List<String>>, columns: List<String>) {
+        val outputFile = File("$fileURL.csv")
+        PrintWriter(outputFile).use { pw ->
+            pw.println(columns.joinToString(separator = ","))
+            data.forEach {
+                pw.println(it.joinToString(separator = ","))
+            }
+        }
+        logger.info { "Wrote CSV file: $fileURL to ${outputFile.absolutePath}." }
+    }
+
+    /**
+     * Write to text file
+     *
+     * @param fileURL the URL of the file
+     * @param data the data to write in the file as String
+     */
+    fun writeStringToTextFile(fileURL: String, data: String) {
+        val outputFile = File("$fileURL")
+        outputFile.printWriter().use {
+            it.println(data)
+        }
+        logger.info { "Wrote txt file: $fileURL to ${outputFile.absolutePath}." }
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/util/InvalidPatcherConfigurationException.kt b/theodolite/src/main/kotlin/theodolite/util/InvalidPatcherConfigurationException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..d02948ad341207051c4653ba9400ac0ffe5b03aa
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/InvalidPatcherConfigurationException.kt
@@ -0,0 +1,3 @@
+package theodolite.util
+
+class InvalidPatcherConfigurationException(message: String, e: Exception? = null) : DeploymentFailedException(message,e)
diff --git a/theodolite/src/main/kotlin/theodolite/util/KafkaConfig.kt b/theodolite/src/main/kotlin/theodolite/util/KafkaConfig.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4e72ccb0d86749a6538c26556241ac114ef8d9a4
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/KafkaConfig.kt
@@ -0,0 +1,69 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.KafkaConfig.TopicWrapper
+import kotlin.properties.Delegates
+import kotlin.reflect.KProperty
+
+/**
+ * Configuration of Kafka connection.
+ *
+ * @see TopicWrapper
+ */
+@RegisterForReflection
+@JsonDeserialize
+class KafkaConfig {
+    /**
+     * The bootstrap server connection string
+     */
+    lateinit var bootstrapServer: String
+
+    /**
+     * The list of topics
+     */
+    lateinit var topics: List<TopicWrapper>
+
+    /**
+     * Wrapper for a topic definition.
+     */
+    @RegisterForReflection
+    @JsonDeserialize
+    class TopicWrapper {
+        /**
+         * The topic name
+         */
+        lateinit var name: String
+
+        /**
+         * The number of partitions
+         */
+        var numPartitions by Delegates.notNull<Int>()
+
+        /**
+         * The replication factor of this topic
+         */
+        var replicationFactor by Delegates.notNull<Short>()
+
+        /**
+         * If remove only, this topic would only used to delete all topics, which has the name of the topic as a prefix.
+         */
+        var removeOnly by DelegatesFalse()
+    }
+}
+
+/**
+ * Delegates to initialize a lateinit boolean to false
+ */
+@RegisterForReflection
+class DelegatesFalse {
+    private var state = false
+    operator fun getValue(thisRef: Any?, property: KProperty<*>): Boolean {
+        return state
+    }
+
+    operator fun setValue(thisRef: Any?, property: KProperty<*>, value: Boolean) {
+        state = value
+    }
+
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/LoadDimension.kt b/theodolite/src/main/kotlin/theodolite/util/LoadDimension.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cf26da979b05f0a2bd82289ce371715ea0d67c93
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/LoadDimension.kt
@@ -0,0 +1,26 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Representation of the load dimensions for a execution of theodolite.
+ *
+ * @param number the value of this [LoadDimension]
+ * @param type [PatcherDefinition] of this [LoadDimension]
+ */
+@RegisterForReflection
+data class LoadDimension(private val number: Int, private val type: List<PatcherDefinition>) {
+    /**
+     * @return the value of this load dimension.
+     */
+    fun get(): Int {
+        return this.number
+    }
+
+    /**
+     * @return the list of [PatcherDefinition]
+     */
+    fun getType(): List<PatcherDefinition> {
+        return this.type
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/Parser.kt b/theodolite/src/main/kotlin/theodolite/util/Parser.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e435b1cbbf18b9f860ceda69f5f7ec66e64c9375
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/Parser.kt
@@ -0,0 +1,16 @@
+package theodolite.util
+
+/**
+ * Interface for parsers.
+ * A parser allows the reading of files and creates a corresponding object from them.
+ */
+interface Parser {
+    /**
+     * Parse a file.
+     *
+     * @param path The path of the file
+     * @param E The class of the type to parse
+     * @param T The type to parse
+     */
+    fun <T> parse(path: String, E: Class<T>): T?
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/PatcherDefinition.kt b/theodolite/src/main/kotlin/theodolite/util/PatcherDefinition.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6ec0cce36751ec0343d40aa49fefa44f4c7fc918
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/PatcherDefinition.kt
@@ -0,0 +1,25 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import com.fasterxml.jackson.databind.annotation.JsonSerialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Definition of the structure of a [theodolite.patcher.AbstractPatcher] which implements the [theodolite.patcher.Patcher] interface.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class PatcherDefinition {
+    /**
+     * The type of the patcher
+     */
+    lateinit var type: String
+
+    /**
+     * The resource which the patcher is applied to
+     */
+    lateinit var resource: String
+
+    @JsonSerialize
+    lateinit var properties: MutableMap<String, String>
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/PrometheusResponse.kt b/theodolite/src/main/kotlin/theodolite/util/PrometheusResponse.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9b0b0dd4e0a5a48072ca576e874cb850c5f8df3b
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/PrometheusResponse.kt
@@ -0,0 +1,72 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import java.util.*
+
+/**
+ * This class corresponds to the JSON response format of a Prometheus
+ * [range-query](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-queries)
+ */
+@RegisterForReflection
+data class PrometheusResponse(
+    /**
+     * Indicates whether the query was successful.
+     */
+    var status: String? = null,
+    /**
+     * The data section of the query result contains the information about the resultType and the values itself.
+     */
+    var data: PromData? = null
+) {
+    /**
+     * Return the data of the PrometheusResponse as [List] of [List]s of [String]s
+     * The format of the returned list is: `[[ group, timestamp, value ], [ group, timestamp, value ], ... ]`
+     */
+    fun getResultAsList(): List<List<String>> {
+        val group = data?.result?.get(0)?.metric?.toString()!!
+        val values = data?.result?.get(0)?.values
+        val result = mutableListOf<List<String>>()
+
+        if (values != null) {
+            for (value in values) {
+                val valueList = value as List<*>
+                val timestamp = (valueList[0] as Double).toLong().toString()
+                val value = valueList[1].toString()
+                result.add(listOf(group, timestamp, value))
+            }
+        }
+        return Collections.unmodifiableList(result)
+    }
+}
+
+/**
+ * Description of Prometheus data.
+ *
+ * Based on [PromResult]
+ */
+@RegisterForReflection
+data class PromData(
+    /**
+     * Type of the result, either  "matrix" | "vector" | "scalar" | "string"
+     */
+    var resultType: String? = null,
+    /**
+     * Result of the range-query. In the case of range-query this corresponds to the [range-vectors result format](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors)
+     */
+    var result: List<PromResult>? = null
+)
+
+/**
+ * PromResult corresponds to the [range-vectors result format](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors)
+ */
+@RegisterForReflection
+data class PromResult(
+    /**
+     * Label of the metric
+     */
+    var metric: Map<String, String>? = null,
+    /**
+     *  Values of the metric (e.g. [ [ <unix_time>, "<sample_value>" ], ... ])
+     */
+    var values: List<Any>? = null
+)
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/util/Resource.kt b/theodolite/src/main/kotlin/theodolite/util/Resource.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1d6410aa4288e19817e3ba48bfd1bc0d85d006a2
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/Resource.kt
@@ -0,0 +1,24 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Representation of the resources for an execution of Theodolite.
+ */
+@RegisterForReflection
+data class Resource(private val number: Int, private val type: List<PatcherDefinition>) {
+
+    /**
+     * @return the value of this resource.
+     */
+    fun get(): Int {
+        return this.number
+    }
+
+    /**
+     * @return the list of [PatcherDefinition]
+     */
+    fun getType(): List<PatcherDefinition> {
+        return this.type
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/Results.kt b/theodolite/src/main/kotlin/theodolite/util/Results.kt
new file mode 100644
index 0000000000000000000000000000000000000000..60641ea0248435de53aaaaf362da7be995b391c5
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/Results.kt
@@ -0,0 +1,86 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Central class that saves the state of a execution of Theodolite. For an execution, it is used to save the result of
+ * individual experiments. Further, it is used by the RestrictionStrategy to
+ * perform the [theodolite.strategies.restriction.RestrictionStrategy].
+ */
+@RegisterForReflection
+class Results {
+    private val results: MutableMap<Pair<LoadDimension, Resource>, Boolean> = mutableMapOf()
+
+    /**
+     * Set the result for an experiment.
+     *
+     * @param experiment A pair that identifies the experiment by the [LoadDimension] and [Resource].
+     * @param successful the result of the experiment. Successful == true and Unsuccessful == false.
+     */
+    fun setResult(experiment: Pair<LoadDimension, Resource>, successful: Boolean) {
+        this.results[experiment] = successful
+    }
+
+    /**
+     * Get the result for an experiment.
+     *
+     * @param experiment A pair that identifies the experiment by the [LoadDimension] and [Resource].
+     * @return true if the experiment was successful and false otherwise. If the result has not been reported so far,
+     * null is returned.
+     *
+     * @see Resource
+     */
+    fun getResult(experiment: Pair<LoadDimension, Resource>): Boolean? {
+        return this.results[experiment]
+    }
+
+    /**
+     * Get the smallest suitable number of instances for a specified [LoadDimension].
+     *
+     * @param load the [LoadDimension]
+     *
+     * @return the smallest suitable number of resources. If the experiment was not executed yet,
+     * a @see Resource with the constant Int.MAX_VALUE as value is returned.
+     * If no experiments have been marked as either successful or unsuccessful
+     * yet, a Resource with the constant value Int.MIN_VALUE is returned.
+     */
+    fun getMinRequiredInstances(load: LoadDimension?): Resource? {
+        if (this.results.isEmpty()) {
+            return Resource(Int.MIN_VALUE, emptyList())
+        }
+
+        var minRequiredInstances: Resource? = Resource(Int.MAX_VALUE, emptyList())
+        for (experiment in results) {
+            // Get all successful experiments for requested load
+            if (experiment.key.first == load && experiment.value) {
+                if (minRequiredInstances == null || experiment.key.second.get() < minRequiredInstances.get()) {
+                    // Found new smallest resources
+                    minRequiredInstances = experiment.key.second
+                }
+            }
+        }
+        return minRequiredInstances
+    }
+
+    /**
+     * Get the largest [LoadDimension] that has been reported executed successfully (or unsuccessfully) so far, for a
+     * [LoadDimension] and is smaller than the given [LoadDimension].
+     *
+     * @param load the [LoadDimension]
+     *
+     * @return the largest [LoadDimension] or null, if there is none for this [LoadDimension]
+     */
+    fun getMaxBenchmarkedLoad(load: LoadDimension): LoadDimension? {
+        var maxBenchmarkedLoad: LoadDimension? = null
+        for (experiment in results) {
+            if (experiment.key.first.get() <= load.get()) {
+                if (maxBenchmarkedLoad == null) {
+                    maxBenchmarkedLoad = experiment.key.first
+                } else if (maxBenchmarkedLoad.get() < experiment.key.first.get()) {
+                    maxBenchmarkedLoad = experiment.key.first
+                }
+            }
+        }
+        return maxBenchmarkedLoad
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/TheodoliteException.kt b/theodolite/src/main/kotlin/theodolite/util/TheodoliteException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..fc7453bae6aaa4c5c526eee72c006562ea887eb5
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/TheodoliteException.kt
@@ -0,0 +1,3 @@
+package theodolite.util
+
+open class TheodoliteException (message: String, e: Exception? = null) : Exception(message,e)
\ No newline at end of file
diff --git a/theodolite/src/main/kotlin/theodolite/util/TypeName.kt b/theodolite/src/main/kotlin/theodolite/util/TypeName.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f20fc7c9ce6757be75d9317e76c23a68b09914bd
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/TypeName.kt
@@ -0,0 +1,14 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * The TypeName encapsulates a list of [PatcherDefinition] along with a typeName that specifies for what the [PatcherDefinition] should be used.
+ */
+@RegisterForReflection
+@JsonDeserialize
+class TypeName {
+    lateinit var typeName: String
+    lateinit var patchers: List<PatcherDefinition>
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/YamlParserFromFile.kt b/theodolite/src/main/kotlin/theodolite/util/YamlParserFromFile.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ae36349e628621bb7ad287d8cf557fbefa3ff5c5
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/YamlParserFromFile.kt
@@ -0,0 +1,18 @@
+package theodolite.util
+
+import org.yaml.snakeyaml.Yaml
+import org.yaml.snakeyaml.constructor.Constructor
+import java.io.File
+import java.io.FileInputStream
+import java.io.InputStream
+
+/**
+ * The YamlParser parses a YAML file
+ */
+class YamlParserFromFile : Parser {
+    override fun <T> parse(path: String, E: Class<T>): T? {
+        val input: InputStream = FileInputStream(File(path))
+        val parser = Yaml(Constructor(E))
+        return parser.loadAs(input, E)
+    }
+}
diff --git a/theodolite/src/main/kotlin/theodolite/util/YamlParserFromString.kt b/theodolite/src/main/kotlin/theodolite/util/YamlParserFromString.kt
new file mode 100644
index 0000000000000000000000000000000000000000..61db189ee99fa5fe36113b0fdecf589ad1114852
--- /dev/null
+++ b/theodolite/src/main/kotlin/theodolite/util/YamlParserFromString.kt
@@ -0,0 +1,17 @@
+package theodolite.util
+
+import org.yaml.snakeyaml.Yaml
+import org.yaml.snakeyaml.constructor.Constructor
+import java.io.File
+import java.io.FileInputStream
+import java.io.InputStream
+
+/**
+ * The YamlParser parses a YAML string
+ */
+class YamlParserFromString : Parser {
+    override fun <T> parse(fileString: String, E: Class<T>): T? {
+        val parser = Yaml(Constructor(E))
+        return parser.loadAs(fileString, E)
+    }
+}
diff --git a/theodolite/src/main/resources/application.properties b/theodolite/src/main/resources/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..42647e2391706286602945cf2be7baa96857ba19
--- /dev/null
+++ b/theodolite/src/main/resources/application.properties
@@ -0,0 +1,6 @@
+quarkus.native.additional-build-args=\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.internal.CertUtils,\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.dsl.internal.uploadable.PodUpload,\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.dsl.internal.core.v1.PodOperationsImpl$1,\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.dsl.internal.core.v1.PodOperationsImpl$3,\
+  --report-unsupported-elements-at-runtime
diff --git a/theodolite/src/main/resources/operator/example-execution-k8s-resource.yaml b/theodolite/src/main/resources/operator/example-execution-k8s-resource.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b81bbcd442834136283dc080f5f6a79bbc1cd415
--- /dev/null
+++ b/theodolite/src/main/resources/operator/example-execution-k8s-resource.yaml
@@ -0,0 +1,29 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: theodolite-example-execution
+spec:  
+  benchmark: uc1-kstreams
+  load:  
+    loadType: "NumSensors"
+    loadValues:
+      - 50000 
+  resources:
+    resourceType: "Instances"
+    resourceValues:
+      - 1
+  slos:
+    - sloType: "lag trend"
+      threshold: 1000
+      prometheusUrl: "http://localhost:32656"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 0
+  execution:
+    strategy: "LinearSearch"
+    duration: 60
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/CompositeStrategyTest.kt b/theodolite/src/test/kotlin/theodolite/CompositeStrategyTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..580d9e747bde687a91ffb1bce2e7c9dfb6f166a2
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/CompositeStrategyTest.kt
@@ -0,0 +1,117 @@
+package theodolite
+
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Test
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.strategies.restriction.LowerBoundRestriction
+import theodolite.strategies.searchstrategy.BinarySearch
+import theodolite.strategies.searchstrategy.CompositeStrategy
+import theodolite.strategies.searchstrategy.LinearSearch
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+@QuarkusTest
+class CompositeStrategyTest {
+
+    @Test
+    fun testEnd2EndLinearSearch() {
+        val mockResults = arrayOf(
+            arrayOf(true, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, false, true, true, true, true),
+            arrayOf(false, false, false, false, true, true, true),
+            arrayOf(false, false, false, false, false, false, true),
+            arrayOf(false, false, false, false, false, false, false)
+        )
+        val mockLoads: List<LoadDimension> = (0..6).map { number -> LoadDimension(number, emptyList()) }
+        val mockResources: List<Resource> = (0..6).map { number -> Resource(number, emptyList()) }
+        val results = Results()
+        val benchmark = TestBenchmark()
+        val sloChecker: BenchmarkExecution.Slo = BenchmarkExecution.Slo()
+        val benchmarkExecutor = TestBenchmarkExecutorImpl(mockResults, benchmark, results, listOf(sloChecker), 0, 0, 5)
+        val linearSearch = LinearSearch(benchmarkExecutor)
+        val lowerBoundRestriction = LowerBoundRestriction(results)
+        val strategy =
+            CompositeStrategy(benchmarkExecutor, linearSearch, setOf(lowerBoundRestriction))
+
+        val actual: ArrayList<Resource?> = ArrayList()
+        val expected: ArrayList<Resource?> = ArrayList(listOf(0, 2, 2, 3, 4, 6).map { x -> Resource(x, emptyList()) })
+        expected.add(null)
+
+        for (load in mockLoads) {
+            actual.add(strategy.findSuitableResource(load, mockResources))
+        }
+
+        assertEquals(actual, expected)
+    }
+
+    @Test
+    fun testEnd2EndBinarySearch() {
+        val mockResults = arrayOf(
+            arrayOf(true, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, false, true, true, true, true),
+            arrayOf(false, false, false, false, true, true, true),
+            arrayOf(false, false, false, false, false, false, true),
+            arrayOf(false, false, false, false, false, false, false)
+        )
+        val mockLoads: List<LoadDimension> = (0..6).map { number -> LoadDimension(number, emptyList()) }
+        val mockResources: List<Resource> = (0..6).map { number -> Resource(number, emptyList()) }
+        val results = Results()
+        val benchmark = TestBenchmark()
+        val sloChecker: BenchmarkExecution.Slo = BenchmarkExecution.Slo()
+        val benchmarkExecutorImpl =
+            TestBenchmarkExecutorImpl(mockResults, benchmark, results, listOf(sloChecker), 0, 0, 0)
+        val binarySearch = BinarySearch(benchmarkExecutorImpl)
+        val lowerBoundRestriction = LowerBoundRestriction(results)
+        val strategy =
+            CompositeStrategy(benchmarkExecutorImpl, binarySearch, setOf(lowerBoundRestriction))
+
+        val actual: ArrayList<Resource?> = ArrayList()
+        val expected: ArrayList<Resource?> = ArrayList(listOf(0, 2, 2, 3, 4, 6).map { x -> Resource(x, emptyList()) })
+        expected.add(null)
+
+        for (load in mockLoads) {
+            actual.add(strategy.findSuitableResource(load, mockResources))
+        }
+
+        assertEquals(actual, expected)
+    }
+
+    @Test
+    fun testEnd2EndBinarySearch2() {
+        val mockResults = arrayOf(
+            arrayOf(true, true, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true, true),
+            arrayOf(false, false, false, true, true, true, true, true),
+            arrayOf(false, false, false, false, true, true, true, true),
+            arrayOf(false, false, false, false, false, false, true, true),
+            arrayOf(false, false, false, false, false, false, false, true)
+        )
+        val mockLoads: List<LoadDimension> = (0..6).map { number -> LoadDimension(number, emptyList()) }
+        val mockResources: List<Resource> = (0..7).map { number -> Resource(number, emptyList()) }
+        val results = Results()
+        val benchmark = TestBenchmark()
+        val sloChecker: BenchmarkExecution.Slo = BenchmarkExecution.Slo()
+        val benchmarkExecutor = TestBenchmarkExecutorImpl(mockResults, benchmark, results, listOf(sloChecker), 0, 0, 0)
+        val binarySearch = BinarySearch(benchmarkExecutor)
+        val lowerBoundRestriction = LowerBoundRestriction(results)
+        val strategy =
+            CompositeStrategy(benchmarkExecutor, binarySearch, setOf(lowerBoundRestriction))
+
+        val actual: ArrayList<Resource?> = ArrayList()
+        val expected: ArrayList<Resource?> =
+            ArrayList(listOf(0, 2, 2, 3, 4, 6, 7).map { x -> Resource(x, emptyList()) })
+
+        for (load in mockLoads) {
+            actual.add(strategy.findSuitableResource(load, mockResources))
+        }
+
+        assertEquals(actual, expected)
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/ResourceLimitPatcherTest.kt b/theodolite/src/test/kotlin/theodolite/ResourceLimitPatcherTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..46758583172c3fcd6417e17ff5bab85f8659734b
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/ResourceLimitPatcherTest.kt
@@ -0,0 +1,89 @@
+package theodolite
+
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.quarkus.test.junit.QuarkusTest
+import io.smallrye.common.constraint.Assert.assertTrue
+import org.junit.jupiter.api.Test
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+import theodolite.patcher.PatcherFactory
+import theodolite.util.PatcherDefinition
+
+/**
+ * Resource patcher test
+ *
+ * This class tested 4 scenarios for the ResourceLimitPatcher and the ResourceRequestPatcher.
+ * The different test cases specifies four possible situations:
+ * Case 1:  In the given YAML declaration memory and cpu are defined
+ * Case 2:  In the given YAML declaration only cpu is defined
+ * Case 3:  In the given YAML declaration only memory is defined
+ * Case 4:  In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+ */
+@QuarkusTest
+class ResourceLimitPatcherTest {
+    val testPath = "./src/test/resources/"
+    val loader = K8sResourceLoaderFromFile(DefaultKubernetesClient().inNamespace(""))
+    val patcherFactory = PatcherFactory()
+
+    fun applyTest(fileName: String) {
+        val cpuValue = "50m"
+        val memValue = "3Gi"
+        val k8sResource = loader.loadK8sResource("Deployment", testPath + fileName) as Deployment
+
+        val defCPU = PatcherDefinition()
+        defCPU.resource = "cpu-memory-deployment.yaml"
+        defCPU.type = "ResourceLimitPatcher"
+        defCPU.properties = mutableMapOf(
+            "limitedResource" to "cpu",
+            "container" to "application"
+        )
+
+        val defMEM = PatcherDefinition()
+        defMEM.resource = "cpu-memory-deployment.yaml"
+        defMEM.type = "ResourceLimitPatcher"
+        defMEM.properties = mutableMapOf(
+            "limitedResource" to "memory",
+            "container" to "uc-application"
+        )
+
+        patcherFactory.createPatcher(
+            patcherDefinition = defCPU,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = cpuValue)
+
+        patcherFactory.createPatcher(
+            patcherDefinition = defMEM,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = memValue)
+
+        k8sResource.spec.template.spec.containers.filter { it.name == defCPU.properties["container"]!! }
+            .forEach {
+                assertTrue(it.resources.limits["cpu"].toString() == cpuValue)
+                assertTrue(it.resources.limits["memory"].toString() == memValue)
+            }
+    }
+
+    @Test
+    fun testWithExistingCpuAndMemoryDeclarations() {
+        // Case 1: In the given YAML declaration memory and cpu are defined
+        applyTest("cpu-memory-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingCpuDeclarations() {
+        // Case 2:  In the given YAML declaration only cpu is defined
+        applyTest("cpu-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingMemoryDeclarations() {
+        //  Case 3:  In the given YAML declaration only memory is defined
+        applyTest("memory-deployment.yaml")
+    }
+
+    @Test
+    fun testWithoutResourceDeclarations() {
+        // Case 4: In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+        applyTest("no-resources-deployment.yaml")
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/ResourceRequestPatcherTest.kt b/theodolite/src/test/kotlin/theodolite/ResourceRequestPatcherTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8794d4dc2d67b8af78f4fa409c727f882922d0b8
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/ResourceRequestPatcherTest.kt
@@ -0,0 +1,88 @@
+package theodolite
+
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.quarkus.test.junit.QuarkusTest
+import io.smallrye.common.constraint.Assert.assertTrue
+import org.junit.jupiter.api.Test
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+import theodolite.patcher.PatcherFactory
+import theodolite.util.PatcherDefinition
+
+/**
+ * Resource patcher test
+ *
+ * This class tested 4 scenarios for the ResourceLimitPatcher and the ResourceRequestPatcher.
+ * The different test cases specifies four possible situations:
+ * Case 1:  In the given YAML declaration memory and cpu are defined
+ * Case 2:  In the given YAML declaration only cpu is defined
+ * Case 3:  In the given YAML declaration only memory is defined
+ * Case 4:  In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+ */
+@QuarkusTest
+class ResourceRequestPatcherTest {
+    val testPath = "./src/test/resources/"
+    val loader = K8sResourceLoaderFromFile(DefaultKubernetesClient().inNamespace(""))
+    val patcherFactory = PatcherFactory()
+
+    fun applyTest(fileName: String) {
+        val cpuValue = "50m"
+        val memValue = "3Gi"
+        val k8sResource = loader.loadK8sResource("Deployment", testPath + fileName) as Deployment
+
+        val defCPU = PatcherDefinition()
+        defCPU.resource = "cpu-memory-deployment.yaml"
+        defCPU.type = "ResourceRequestPatcher"
+        defCPU.properties = mutableMapOf(
+            "requestedResource" to "cpu",
+            "container" to "application"
+        )
+
+        val defMEM = PatcherDefinition()
+        defMEM.resource = "cpu-memory-deployment.yaml"
+        defMEM.type = "ResourceRequestPatcher"
+        defMEM.properties = mutableMapOf(
+            "requestedResource" to "memory",
+            "container" to "application"
+        )
+
+        patcherFactory.createPatcher(
+            patcherDefinition = defCPU,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = cpuValue)
+        patcherFactory.createPatcher(
+            patcherDefinition = defMEM,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = memValue)
+
+        k8sResource.spec.template.spec.containers.filter { it.name == defCPU.properties["container"]!! }
+            .forEach {
+                assertTrue(it.resources.requests["cpu"].toString() == cpuValue)
+                assertTrue(it.resources.requests["memory"].toString() == memValue)
+            }
+    }
+
+    @Test
+    fun testWithExistingCpuAndMemoryDeclarations() {
+        // Case 1: In the given YAML declaration memory and cpu are defined
+        applyTest("cpu-memory-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingCpuDeclarations() {
+        // Case 2:  In the given YAML declaration only cpu is defined
+        applyTest("cpu-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingMemoryDeclarations() {
+        //  Case 3:  In the given YAML declaration only memory is defined
+        applyTest("memory-deployment.yaml")
+    }
+
+    @Test
+    fun testWithoutResourceDeclarations() {
+        // Case 4: In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+        applyTest("no-resources-deployment.yaml")
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/TestBenchmark.kt b/theodolite/src/test/kotlin/theodolite/TestBenchmark.kt
new file mode 100644
index 0000000000000000000000000000000000000000..b08c1a18a3013e1573e4892f01698b5e509f9609
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/TestBenchmark.kt
@@ -0,0 +1,26 @@
+package theodolite
+
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkDeployment
+import theodolite.util.ConfigurationOverride
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+class TestBenchmark : Benchmark {
+
+    override fun setupInfrastructure() {
+    }
+
+    override fun teardownInfrastructure() {
+    }
+
+    override fun buildDeployment(
+        load: LoadDimension,
+        res: Resource,
+        configurationOverrides: List<ConfigurationOverride?>,
+        loadGenerationDelay: Long,
+        afterTeardownDelay: Long
+    ): BenchmarkDeployment {
+        return TestBenchmarkDeployment()
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/TestBenchmarkDeployment.kt b/theodolite/src/test/kotlin/theodolite/TestBenchmarkDeployment.kt
new file mode 100644
index 0000000000000000000000000000000000000000..68b08c294128368ee1b65549aa85c877bd4bf313
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/TestBenchmarkDeployment.kt
@@ -0,0 +1,9 @@
+package theodolite
+
+import theodolite.benchmark.BenchmarkDeployment
+
+class TestBenchmarkDeployment : BenchmarkDeployment {
+    override fun setup() {}
+
+    override fun teardown() {}
+}
diff --git a/theodolite/src/test/kotlin/theodolite/TestBenchmarkExecutorImpl.kt b/theodolite/src/test/kotlin/theodolite/TestBenchmarkExecutorImpl.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2efddc48cb93a0870d1716c58a7018145c16e2ff
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/TestBenchmarkExecutorImpl.kt
@@ -0,0 +1,38 @@
+package theodolite
+
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+import java.time.Duration
+
+class TestBenchmarkExecutorImpl(
+    private val mockResults: Array<Array<Boolean>>,
+    benchmark: Benchmark,
+    results: Results,
+    slo: List<BenchmarkExecution.Slo>,
+    executionId: Int,
+    loadGenerationDelay: Long,
+    afterTeardownDelay: Long
+) :
+    BenchmarkExecutor(
+        benchmark,
+        results,
+        executionDuration = Duration.ofSeconds(1),
+        configurationOverrides = emptyList(),
+        slos = slo,
+        repetitions = 1,
+        executionId = executionId,
+        loadGenerationDelay = loadGenerationDelay,
+        afterTeardownDelay = afterTeardownDelay,
+        executionName = "test-execution"
+    ) {
+
+    override fun runExperiment(load: LoadDimension, res: Resource): Boolean {
+        val result = this.mockResults[load.get()][res.get()]
+        this.results.setResult(Pair(load, res), result)
+        return result
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/benchmark/ConfigMapResourceSetTest.kt b/theodolite/src/test/kotlin/theodolite/benchmark/ConfigMapResourceSetTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2cc8f931418e28ae8841b592f93df8d88440cf3c
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/benchmark/ConfigMapResourceSetTest.kt
@@ -0,0 +1,226 @@
+package theodolite.benchmark
+
+import com.google.gson.Gson
+import io.fabric8.kubernetes.api.model.*
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import io.smallrye.common.constraint.Assert.assertTrue
+import junit.framework.Assert.assertEquals
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.Test
+import theodolite.k8s.CustomResourceWrapper
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+import theodolite.util.DeploymentFailedException
+
+private val testResourcePath = "./src/test/resources/k8s-resource-files/"
+
+@QuarkusTest
+class ConfigMapResourceSetTest {
+    private val server = KubernetesServer(false, true)
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    fun deployAndGetResource(resource: String): Collection<Pair<String, KubernetesResource>> {
+        val configMap1 = ConfigMapBuilder()
+            .withNewMetadata().withName("test-configmap").endMetadata()
+            .addToData("test-resource.yaml",resource)
+            .build()
+
+        server.client.configMaps().createOrReplace(configMap1)
+
+        val resourceSet = ConfigMapResourceSet()
+        resourceSet.name = "test-configmap"
+
+        return resourceSet.getResourceSet(server.client)
+    }
+
+
+    @Test
+    fun testLoadDeployment() {
+        val resourceBuilder = DeploymentBuilder()
+        resourceBuilder.withNewSpec().endSpec()
+        resourceBuilder.withNewMetadata().endMetadata()
+        val resource = resourceBuilder.build()
+        resource.metadata.name = "test-deployment"
+
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource))
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is Deployment)
+        assertTrue(createdResource.toMutableSet().first().second.toString().contains(other = resource.metadata.name))
+    }
+
+    @Test
+    fun testLoadStateFulSet() {
+        val resourceBuilder = StatefulSetBuilder()
+        resourceBuilder.withNewSpec().endSpec()
+        resourceBuilder.withNewMetadata().endMetadata()
+        val resource = resourceBuilder.build()
+        resource.metadata.name = "test-resource"
+
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource))
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is StatefulSet)
+        assertTrue(createdResource.toMutableSet().first().second.toString().contains(other = resource.metadata.name))
+    }
+
+    @Test
+    fun testLoadService() {
+        val resourceBuilder = ServiceBuilder()
+        resourceBuilder.withNewSpec().endSpec()
+        resourceBuilder.withNewMetadata().endMetadata()
+        val resource = resourceBuilder.build()
+        resource.metadata.name = "test-resource"
+
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource))
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is Service)
+        assertTrue(createdResource.toMutableSet().first().second.toString().contains(other = resource.metadata.name))
+    }
+
+    @Test
+    fun testLoadConfigMap() {
+        val resourceBuilder = ConfigMapBuilder()
+        resourceBuilder.withNewMetadata().endMetadata()
+        val resource = resourceBuilder.build()
+        resource.metadata.name = "test-resource"
+
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource))
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is ConfigMap)
+        assertTrue(createdResource.toMutableSet().first().second.toString().contains(other = resource.metadata.name))
+    }
+
+    @Test
+    fun testLoadExecution() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("Execution", testResourcePath + "test-execution.yaml") as CustomResourceWrapper
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource.crAsMap))
+
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is CustomResourceWrapper)
+
+        val loadedResource = createdResource.toMutableSet().first().second
+        if (loadedResource is CustomResourceWrapper){
+            assertTrue(loadedResource.getName() == "example-execution")
+        }
+    }
+
+    @Test
+    fun testLoadBenchmark() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("Benchmark", testResourcePath + "test-benchmark.yaml") as CustomResourceWrapper
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource.crAsMap))
+
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is CustomResourceWrapper)
+
+        val loadedResource = createdResource.toMutableSet().first().second
+        if (loadedResource is CustomResourceWrapper){
+            assertTrue(loadedResource.getName() == "example-benchmark")
+        }
+    }
+
+    @Test
+    fun testLoadServiceMonitor() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("ServiceMonitor", testResourcePath + "test-service-monitor.yaml") as CustomResourceWrapper
+        val createdResource = deployAndGetResource(resource = Gson().toJson(resource.crAsMap))
+
+        assertEquals(1, createdResource.size)
+        assertTrue(createdResource.toMutableSet().first().second is CustomResourceWrapper)
+
+        val loadedResource = createdResource.toMutableSet().first().second
+        if (loadedResource is CustomResourceWrapper){
+            assertTrue(loadedResource.getName() == "test-service-monitor")
+        }
+    }
+
+    @Test
+    fun testMultipleFiles(){
+        val resourceBuilder = DeploymentBuilder()
+        resourceBuilder.withNewSpec().endSpec()
+        resourceBuilder.withNewMetadata().endMetadata()
+        val resource = resourceBuilder.build()
+        resource.metadata.name = "test-deployment"
+
+        val resourceBuilder1 = ConfigMapBuilder()
+        resourceBuilder1.withNewMetadata().endMetadata()
+        val resource1 = resourceBuilder1.build()
+        resource1.metadata.name = "test-configmap"
+
+        val configMap1 = ConfigMapBuilder()
+            .withNewMetadata().withName("test-configmap").endMetadata()
+            .addToData("test-deployment.yaml",Gson().toJson(resource))
+            .addToData("test-configmap.yaml",Gson().toJson(resource1))
+            .build()
+
+        server.client.configMaps().createOrReplace(configMap1)
+
+        val resourceSet = ConfigMapResourceSet()
+        resourceSet.name = "test-configmap"
+
+        val createdResourcesSet = resourceSet.getResourceSet(server.client)
+
+        assertEquals(2,createdResourcesSet.size )
+        assert(createdResourcesSet.toMutableList()[0].second is Deployment)
+        assert(createdResourcesSet.toMutableList()[1].second is ConfigMap)
+    }
+
+    @Test
+    fun testFileIsSet(){
+        val resourceBuilder = DeploymentBuilder()
+        resourceBuilder.withNewSpec().endSpec()
+        resourceBuilder.withNewMetadata().endMetadata()
+        val resource = resourceBuilder.build()
+        resource.metadata.name = "test-deployment"
+
+        val resourceBuilder1 = ConfigMapBuilder()
+        resourceBuilder1.withNewMetadata().endMetadata()
+        val resource1 = resourceBuilder1.build()
+        resource1.metadata.name = "test-configmap"
+
+        val configMap1 = ConfigMapBuilder()
+            .withNewMetadata().withName("test-configmap").endMetadata()
+            .addToData("test-deployment.yaml",Gson().toJson(resource))
+            .addToData("test-configmap.yaml",Gson().toJson(resource1))
+            .build()
+
+        server.client.configMaps().createOrReplace(configMap1)
+
+        val resourceSet = ConfigMapResourceSet()
+        resourceSet.name = "test-configmap"
+        resourceSet.files = listOf("test-deployment.yaml")
+
+        val createdResourcesSet = resourceSet.getResourceSet(server.client)
+
+        assertEquals(1,createdResourcesSet.size )
+        assert(createdResourcesSet.toMutableSet().first().second is Deployment)
+    }
+
+
+    @Test()
+    fun testConfigMapNotExist() {
+        val resourceSet = ConfigMapResourceSet()
+        resourceSet.name = "test-configmap1"
+        lateinit var ex: Exception
+        try {
+            resourceSet.getResourceSet(server.client)
+        } catch (e: Exception) {
+            ex = e
+        }
+        assertTrue(ex is DeploymentFailedException)
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/benchmark/FileSystemResourceSetTest.kt b/theodolite/src/test/kotlin/theodolite/benchmark/FileSystemResourceSetTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..59ad2be3248f67442ce352788f8b94b26f3b6b90
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/benchmark/FileSystemResourceSetTest.kt
@@ -0,0 +1,115 @@
+package theodolite.benchmark
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.smallrye.common.constraint.Assert.assertTrue
+import junit.framework.Assert.assertEquals
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.Test
+import theodolite.k8s.CustomResourceWrapper
+import theodolite.util.DeploymentFailedException
+import java.lang.IllegalStateException
+
+private val testResourcePath = "./src/test/resources/k8s-resource-files/"
+
+class FileSystemResourceSetTest {
+
+    private val server = KubernetesServer(false, true)
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    fun testLoadDeployment() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-deployment.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is Deployment)
+    }
+
+    @Test
+    fun testLoadService() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-service.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is Service)
+    }
+
+    @Test
+    fun testLoadStatefulSet() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-statefulset.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is StatefulSet)
+    }
+
+    @Test
+    fun testLoadConfigMap() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-configmap.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is ConfigMap)
+    }
+
+    @Test
+    fun testLoadServiceMonitor() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-service-monitor.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is CustomResourceWrapper)
+    }
+
+    @Test
+    fun testLoadBenchmark() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-benchmark.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is CustomResourceWrapper)
+    }
+
+    @Test
+    fun testLoadExecution() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        resourceSet.files = listOf("test-execution.yaml")
+        assertEquals(1,resourceSet.getResourceSet(server.client).size)
+        assertTrue(resourceSet.getResourceSet(server.client).toMutableSet().first().second is CustomResourceWrapper)
+    }
+
+    @Test
+    fun testFilesNotSet() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = testResourcePath
+        assertEquals(9,resourceSet.getResourceSet(server.client).size)
+    }
+
+    @Test
+    fun testWrongPath() {
+        val resourceSet = FileSystemResourceSet()
+        resourceSet.path = "/abc/not-exist"
+        lateinit var ex: Exception
+        try {
+            resourceSet.getResourceSet(server.client)
+        } catch (e: Exception) {
+            println(e)
+            ex = e
+        }
+        assertTrue(ex is DeploymentFailedException)    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/execution/operator/BenchmarkCRDummy.kt b/theodolite/src/test/kotlin/theodolite/execution/operator/BenchmarkCRDummy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e294ea539ea60104cc00e9f73de790302ad52670
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/execution/operator/BenchmarkCRDummy.kt
@@ -0,0 +1,42 @@
+package theodolite.execution.operator
+
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.benchmark.Resources
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.util.KafkaConfig
+
+class BenchmarkCRDummy(name: String) {
+
+    private val benchmark = KubernetesBenchmark()
+    private val benchmarkCR = BenchmarkCRD(benchmark)
+
+    fun getCR(): BenchmarkCRD {
+        return benchmarkCR
+    }
+
+    init {
+        val kafkaConfig = KafkaConfig()
+
+        kafkaConfig.bootstrapServer = ""
+        kafkaConfig.topics = emptyList()
+
+        benchmarkCR.spec = benchmark
+        benchmarkCR.metadata.name = name
+        benchmarkCR.kind = "Benchmark"
+        benchmarkCR.apiVersion = "v1"
+
+
+        benchmark.infrastructure = Resources()
+        benchmark.sut = Resources()
+        benchmark.loadGenerator = Resources()
+
+        benchmark.infrastructure.resources = emptyList()
+        benchmark.sut.resources = emptyList()
+        benchmark.loadGenerator.resources = emptyList()
+
+        benchmark.resourceTypes = emptyList()
+        benchmark.loadTypes = emptyList()
+        benchmark.kafkaConfig = kafkaConfig
+        benchmark.name = benchmarkCR.metadata.name
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/execution/operator/ControllerTest.kt b/theodolite/src/test/kotlin/theodolite/execution/operator/ControllerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7e0532aff36cac2fb1a1c718415315b8f54052c2
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/execution/operator/ControllerTest.kt
@@ -0,0 +1,140 @@
+package theodolite.execution.operator
+
+import com.google.gson.Gson
+import com.google.gson.GsonBuilder
+import io.fabric8.kubernetes.client.CustomResourceList
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.model.crd.BenchmarkStates
+import theodolite.model.crd.ExecutionCRD
+
+@QuarkusTest
+class ControllerTest {
+    private final val server = KubernetesServer(false, false)
+    lateinit var controller: TheodoliteController
+    private val gson: Gson = GsonBuilder().enableComplexMapKeySerialization().create()
+
+    private var benchmark = KubernetesBenchmark()
+    private var execution = BenchmarkExecution()
+
+    private val benchmarkResourceList = CustomResourceList<BenchmarkCRD>()
+    private val executionResourceList = CustomResourceList<ExecutionCRD>()
+
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+        this.controller = TheodoliteOperator().getController(
+            client = server.client,
+            executionStateHandler = ExecutionStateHandler(server.client),
+            benchmarkStateHandler =  BenchmarkStateHandler(server.client)
+        )
+
+        // benchmark
+        val benchmark1 = BenchmarkCRDummy(name = "Test-Benchmark")
+        benchmark1.getCR().status.resourceSetsState = BenchmarkStates.READY.value
+        val benchmark2 = BenchmarkCRDummy(name = "Test-Benchmark-123")
+        benchmarkResourceList.items = listOf(benchmark1.getCR(), benchmark2.getCR())
+
+        // execution
+        val execution1 = ExecutionCRDummy(name = "matching-execution", benchmark = "Test-Benchmark")
+        val execution2 = ExecutionCRDummy(name = "non-matching-execution", benchmark = "Test-Benchmark-456")
+        val execution3 = ExecutionCRDummy(name = "second-matching-execution", benchmark = "Test-Benchmark")
+        executionResourceList.items = listOf(execution1.getCR(), execution2.getCR(), execution3.getCR())
+
+        this.benchmark = benchmark1.getCR().spec
+        this.execution = execution1.getCR().spec
+
+        server
+            .expect()
+            .get()
+            .withPath("/apis/theodolite.com/v1/namespaces/test/benchmarks")
+            .andReturn(200, benchmarkResourceList)
+            .always()
+
+        server
+            .expect()
+            .get()
+            .withPath("/apis/theodolite.com/v1/namespaces/test/executions")
+            .andReturn(200, executionResourceList)
+            .always()
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    @DisplayName("Check namespaced property of benchmarkCRDClient")
+    fun testBenchmarkClientNamespaced() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod("getBenchmarks")
+        method.isAccessible = true
+        method.invoke(controller)
+
+        assert(
+            server
+                .lastRequest
+                .toString()
+                .contains("namespaces")
+        )
+    }
+
+    @Test
+    @DisplayName("Check namespaced property of executionCRDClient")
+    fun testExecutionClientNamespaced() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod("getNextExecution")
+        method.isAccessible = true
+        method.invoke(controller)
+
+        assert(
+            server
+                .lastRequest
+                .toString()
+                .contains("namespaces")
+        )
+    }
+
+    @Test
+    fun getBenchmarksTest() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod("getBenchmarks")
+        method.isAccessible = true
+
+        val result = method.invoke(controller) as List<BenchmarkCRD>
+
+        assertEquals(2, result.size)
+        assertEquals(
+            gson.toJson(benchmark),
+            gson.toJson(result.firstOrNull()?.spec)
+        )
+    }
+
+    @Test
+    fun getNextExecution() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod("getNextExecution")
+        method.isAccessible = true
+
+        val result = method.invoke(controller) as BenchmarkExecution?
+
+        assertEquals(
+            gson.toJson(this.execution),
+            gson.toJson(result)
+        )
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/execution/operator/ExecutionCRDummy.kt b/theodolite/src/test/kotlin/theodolite/execution/operator/ExecutionCRDummy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..51347d41b396bf375c14d5580b0f2619ce5b518c
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/execution/operator/ExecutionCRDummy.kt
@@ -0,0 +1,56 @@
+package theodolite.execution.operator
+
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.ExecutionStatus
+import theodolite.model.crd.ExecutionStates
+
+class ExecutionCRDummy(name: String, benchmark: String) {
+
+    private val execution = BenchmarkExecution()
+    private val executionState = ExecutionStatus()
+    private val executionCR = ExecutionCRD(execution, executionState)
+
+    fun getCR(): ExecutionCRD {
+        return this.executionCR
+    }
+
+    fun getStatus() : ExecutionStatus {
+        return this.executionState
+    }
+
+    init {
+        // configure metadata
+        executionCR.spec = execution
+        executionCR.metadata.name = name
+        executionCR.kind = "Execution"
+        executionCR.apiVersion = "v1"
+
+        // configure execution
+        val loadType = BenchmarkExecution.LoadDefinition()
+        loadType.loadType = ""
+        loadType.loadValues = emptyList()
+
+        val resourceDef = BenchmarkExecution.ResourceDefinition()
+        resourceDef.resourceType = ""
+        resourceDef.resourceValues = emptyList()
+
+        val exec = BenchmarkExecution.Execution()
+        exec.afterTeardownDelay = 0
+        exec.duration = 0
+        exec.loadGenerationDelay = 0
+        exec.repetitions = 1
+        exec.restrictions = emptyList()
+        exec.strategy = ""
+
+        execution.benchmark = benchmark
+        execution.load = loadType
+        execution.resources = resourceDef
+        execution.slos = emptyList()
+        execution.execution = exec
+        execution.configOverrides = mutableListOf()
+        execution.name = executionCR.metadata.name
+
+        executionState.executionState = ExecutionStates.PENDING.value
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/execution/operator/ExecutionEventHandlerTest.kt b/theodolite/src/test/kotlin/theodolite/execution/operator/ExecutionEventHandlerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..d8db7ab3b64ce3856984ddbc279ef148aa325e73
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/execution/operator/ExecutionEventHandlerTest.kt
@@ -0,0 +1,227 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.k8s.K8sManager
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+import theodolite.model.crd.ExecutionStates
+import java.lang.Thread.sleep
+
+
+private const val RESYNC_PERIOD = 1000 * 1000.toLong()
+
+
+@QuarkusTest
+class ExecutionEventHandlerTest {
+    private final val server = KubernetesServer(false, true)
+    private val testResourcePath = "./src/test/resources/k8s-resource-files/"
+    private final val executionName = "example-execution"
+    lateinit var factory: SharedInformerFactory
+    lateinit var executionVersion1: KubernetesResource
+    lateinit var executionVersion2: KubernetesResource
+    lateinit var stateHandler: ExecutionStateHandler
+    lateinit var manager: K8sManager
+    lateinit var controller: TheodoliteController
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+        val operator = TheodoliteOperator()
+        this.controller = operator.getController(
+            client = server.client,
+            executionStateHandler = ExecutionStateHandler(client = server.client),
+            benchmarkStateHandler = BenchmarkStateHandler(client = server.client)
+        )
+
+        this.factory = operator.getExecutionEventHandler(this.controller, server.client)
+        this.stateHandler = TheodoliteOperator().getExecutionStateHandler(client = server.client)
+
+        this.executionVersion1 = K8sResourceLoaderFromFile(server.client)
+            .loadK8sResource("Execution", testResourcePath + "test-execution.yaml")
+
+        this.executionVersion2 = K8sResourceLoaderFromFile(server.client)
+            .loadK8sResource("Execution", testResourcePath + "test-execution-update.yaml")
+
+        this.stateHandler = operator.getExecutionStateHandler(server.client)
+
+        this.manager = K8sManager((server.client))
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+        factory.stopAllRegisteredInformers()
+    }
+
+    @Test
+    @DisplayName("Check namespaced property of informers")
+    fun testNamespaced() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        server.lastRequest
+        // the second request must be namespaced (this is the first `GET` request)
+        assert(
+            server
+                .lastRequest
+                .toString()
+                .contains("namespaces")
+        )
+    }
+
+    @Test
+    @DisplayName("Test onAdd method for executions without execution state")
+    fun testWithoutState() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+        assertEquals(
+            ExecutionStates.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onAdd method for executions with execution state `RUNNING`")
+    fun testWithStateIsRunning() {
+        manager.deploy(executionVersion1)
+        stateHandler
+            .setExecutionState(
+                resourceName = executionName,
+                status = ExecutionStates.RUNNING
+            )
+        factory.startAllRegisteredInformers()
+        sleep(500)
+        assertEquals(
+            ExecutionStates.RESTART,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `PENDING`")
+    fun testOnUpdatePending() {
+        manager.deploy(executionVersion1)
+
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        assertEquals(
+            ExecutionStates.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+
+        manager.deploy(executionVersion2)
+        assertEquals(
+            ExecutionStates.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `FINISHED`")
+    fun testOnUpdateFinished() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = ExecutionStates.FINISHED
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            ExecutionStates.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `FAILURE`")
+    fun testOnUpdateFailure() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = ExecutionStates.FAILURE
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            ExecutionStates.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `RUNNING`")
+    fun testOnUpdateRunning() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = ExecutionStates.RUNNING
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            ExecutionStates.RESTART,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `RESTART`")
+    fun testOnUpdateRestart() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = ExecutionStates.RESTART
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            ExecutionStates.RESTART,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/execution/operator/StateHandlerTest.kt b/theodolite/src/test/kotlin/theodolite/execution/operator/StateHandlerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..a54f4ed6db559f8f7f15ae82deecf3fedf8b4abe
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/execution/operator/StateHandlerTest.kt
@@ -0,0 +1,77 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertTrue
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.k8s.K8sManager
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+import theodolite.model.crd.ExecutionStates
+import java.time.Duration
+
+class StateHandlerTest {
+    private val testResourcePath = "./src/test/resources/k8s-resource-files/"
+    private val server = KubernetesServer(false, true)
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+        val executionResource = K8sResourceLoaderFromFile(server.client)
+            .loadK8sResource("Execution", testResourcePath + "test-execution.yaml")
+
+        K8sManager(server.client).deploy(executionResource)
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    @DisplayName("check if Statehandler is namespaced")
+    fun namespacedTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+        handler.getExecutionState("example-execution")
+        assert(
+            server
+                .lastRequest
+                .toString()
+                .contains("namespaces")
+        )
+    }
+
+    @Test
+    @DisplayName("Test empty execution state")
+    fun executionWithoutExecutionStatusTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+        assertEquals(ExecutionStates.NO_STATE, handler.getExecutionState("example-execution"))
+    }
+
+    @Test
+    @DisplayName("Test empty duration state")
+    fun executionWithoutDurationStatusTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+        assertEquals("-", handler.getDurationState("example-execution"))
+    }
+
+    @Test
+    @DisplayName("Test set and get of the execution state")
+    fun executionStatusTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+
+        assertTrue(handler.setExecutionState("example-execution", ExecutionStates.INTERRUPTED))
+        assertEquals(ExecutionStates.INTERRUPTED, handler.getExecutionState("example-execution"))
+    }
+
+    @Test
+    @DisplayName("Test set and get of the duration state")
+    fun durationStatusTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+
+        assertTrue(handler.setDurationState("example-execution", Duration.ofMillis(100)))
+        assertEquals("0s", handler.getDurationState("example-execution"))
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/k8s/K8sManagerTest.kt b/theodolite/src/test/kotlin/theodolite/k8s/K8sManagerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7c69618de03f730f5b6f1cb83c5df544e2cd120c
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/k8s/K8sManagerTest.kt
@@ -0,0 +1,156 @@
+package theodolite.k8s
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties
+import io.fabric8.kubernetes.api.model.*
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import mu.KotlinLogging
+import org.json.JSONObject
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+
+
+private val logger = KotlinLogging.logger {}
+
+@QuarkusTest
+@JsonIgnoreProperties(ignoreUnknown = true)
+class K8sManagerTest {
+    @JsonIgnoreProperties(ignoreUnknown = true)
+    private final val server = KubernetesServer(false, true)
+    private final val testResourcePath = "./src/test/resources/k8s-resource-files/"
+
+    private final val resourceName = "test-resource"
+    private final val metadata: ObjectMeta = ObjectMetaBuilder().withName(resourceName).build()
+
+
+    val defaultDeployment: Deployment = DeploymentBuilder()
+        .withMetadata(metadata)
+        .withNewSpec()
+        .editOrNewSelector()
+        .withMatchLabels<String, String>(mapOf("app" to "test"))
+        .endSelector()
+        .endSpec()
+        .build()
+
+    val defaultStatefulSet: StatefulSet = StatefulSetBuilder()
+        .withMetadata(metadata)
+        .withNewSpec()
+        .editOrNewSelector()
+        .withMatchLabels<String, String>(mapOf("app" to "test"))
+        .endSelector()
+        .endSpec()
+        .build()
+
+    val defaultService: Service = ServiceBuilder()
+        .withMetadata(metadata)
+        .build()
+
+    val defaultConfigMap: ConfigMap = ConfigMapBuilder()
+        .withMetadata(metadata)
+        .build()
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+
+    }
+
+    @Test
+    @DisplayName("Test handling of Deployments")
+    fun handleDeploymentTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultDeployment)
+        assertEquals(1, server.client.apps().deployments().list().items.size)
+        assertEquals(resourceName, server.client.apps().deployments().list().items.first().metadata.name)
+
+        manager.remove(defaultDeployment)
+        assertEquals(0, server.client.apps().deployments().list().items.size)
+    }
+
+    @Test
+    @DisplayName("Test handling of StatefulSets")
+    fun handleStatefulSetTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultStatefulSet)
+        assertEquals(1, server.client.apps().statefulSets().list().items.size)
+        assertEquals(resourceName, server.client.apps().statefulSets().list().items.first().metadata.name)
+
+        manager.remove(defaultStatefulSet)
+        assertEquals(0, server.client.apps().statefulSets().list().items.size)
+    }
+
+    @Test
+    @DisplayName("Test handling of Services")
+    fun handleServiceTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultService)
+        assertEquals(1, server.client.services().list().items.size)
+        assertEquals(resourceName, server.client.services().list().items.first().metadata.name)
+
+        manager.remove(defaultService)
+        assertEquals(0, server.client.services().list().items.size)
+    }
+
+
+    @Test
+    @DisplayName("Test handling of ConfigMaps")
+    fun handleConfigMapTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultConfigMap)
+        assertEquals(1, server.client.configMaps().list().items.size)
+        assertEquals(resourceName, server.client.configMaps().list().items.first().metadata.name)
+
+        manager.remove(defaultConfigMap)
+        assertEquals(0, server.client.configMaps().list().items.size)
+    }
+
+    @Test
+    @DisplayName("Test handling of custom resources")
+    fun handleCustomResourcesTest() {
+        val manager = K8sManager(server.client)
+        val servicemonitor = K8sResourceLoaderFromFile(server.client)
+            .loadK8sResource("ServiceMonitor", testResourcePath + "test-service-monitor.yaml")
+
+        val serviceMonitorContext = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "monitoring.coreos.com",
+            plural = "servicemonitors"
+        )
+        manager.deploy(servicemonitor)
+
+        var serviceMonitors = JSONObject(server.client.customResource(serviceMonitorContext).list())
+            .getJSONArray("items")
+
+        assertEquals(1, serviceMonitors.length())
+        assertEquals(
+            "test-service-monitor",
+            serviceMonitors.getJSONObject(0).getJSONObject("metadata").getString("name")
+        )
+
+        manager.remove(servicemonitor)
+
+        serviceMonitors = JSONObject(server.client.customResource(serviceMonitorContext).list())
+            .getJSONArray("items")
+
+        assertEquals(0, serviceMonitors.length())
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/k8s/K8sResourceLoaderTest.kt b/theodolite/src/test/kotlin/theodolite/k8s/K8sResourceLoaderTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4a41dac8b27b9d4ddcfb9915f759b14ea4eaba4a
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/k8s/K8sResourceLoaderTest.kt
@@ -0,0 +1,111 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertTrue
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.k8s.resourceLoader.K8sResourceLoaderFromFile
+
+@QuarkusTest
+class K8sResourceLoaderTest {
+    private final val server = KubernetesServer(false, true)
+    private final val testResourcePath = "./src/test/resources/k8s-resource-files/"
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    @DisplayName("Test loading of Deployments")
+    fun loadDeploymentTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("Deployment", testResourcePath + "test-deployment.yaml")
+
+        assertTrue(resource is Deployment)
+        assertTrue(resource.toString().contains("name=test-deployment"))
+    }
+
+    @Test
+    @DisplayName("Test loading of StatefulSet")
+    fun loadStatefulSetTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("StatefulSet", testResourcePath + "test-statefulset.yaml")
+
+        assertTrue(resource is StatefulSet)
+        assertTrue(resource.toString().contains("name=test-statefulset"))
+    }
+
+    @Test
+    @DisplayName("Test loading of Service")
+    fun loadServiceTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("Service", testResourcePath + "test-service.yaml")
+
+        assertTrue(resource is Service)
+        assertTrue(resource.toString().contains("name=test-service"))
+    }
+
+    @Test
+    @DisplayName("Test loading of ConfigMap")
+    fun loadConfigMapTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("ConfigMap", testResourcePath + "test-configmap.yaml")
+
+        assertTrue(resource is ConfigMap)
+        assertTrue(resource.toString().contains("name=test-configmap"))
+    }
+
+    @Test
+    @DisplayName("Test loading of ServiceMonitors")
+    fun loadServiceMonitorTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("ServiceMonitor", testResourcePath + "test-service-monitor.yaml")
+
+        assertTrue(resource is CustomResourceWrapper)
+        if (resource is CustomResourceWrapper) {
+            assertEquals("test-service-monitor", resource.getName())
+
+        }
+    }
+
+    @Test
+    @DisplayName("Test loading of Executions")
+    fun loadExecutionTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("Execution", testResourcePath + "test-execution.yaml")
+
+        assertTrue(resource is CustomResourceWrapper)
+        if (resource is CustomResourceWrapper) {
+            assertEquals("example-execution", resource.getName())
+
+        }
+    }
+
+    @Test
+    @DisplayName("Test loading of Benchmarks")
+    fun loadBenchmarkTest() {
+        val loader = K8sResourceLoaderFromFile(server.client)
+        val resource = loader.loadK8sResource("Benchmark", testResourcePath + "test-benchmark.yaml")
+
+        assertTrue(resource is CustomResourceWrapper)
+        if (resource is CustomResourceWrapper) {
+            assertEquals("example-benchmark", resource.getName())
+
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/patcher/ConfigOverrideModifierTest.kt b/theodolite/src/test/kotlin/theodolite/patcher/ConfigOverrideModifierTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1db1122e1caa5a783159ecaba849b99963e3c2a9
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/patcher/ConfigOverrideModifierTest.kt
@@ -0,0 +1,56 @@
+package theodolite.patcher
+
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.Assertions
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.Test
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.execution.operator.BenchmarkCRDummy
+import theodolite.execution.operator.ExecutionCRDummy
+
+@QuarkusTest
+class ConfigOverrideModifierTest {
+    private var execution = BenchmarkExecution()
+    private var benchmark = KubernetesBenchmark()
+
+
+    @BeforeEach
+    fun setup() {
+        val execution1 = ExecutionCRDummy(name = "matching-execution", benchmark = "Test-Benchmark")
+        val benchmark1 = BenchmarkCRDummy(name = "Test-Benchmark")
+
+        this.execution = execution1.getCR().spec
+        this.benchmark = benchmark1.getCR().spec
+    }
+
+
+    @Test
+    fun setAdditionalLabelsTest() {
+
+        val modifier = ConfigOverrideModifier(
+            execution = this.execution,
+            resources = listOf("test-resource.yaml")
+        )
+
+        modifier.setAdditionalLabels(
+            labelName = "test-name",
+            labelValue = "test-value"
+        )
+
+        Assertions.assertEquals(
+            "test-name",
+            this.execution
+                .configOverrides.firstOrNull()
+                ?.patcher
+                ?.properties
+                ?.get("variableName")
+        )
+        Assertions.assertEquals(
+            "test-value",
+            this.execution
+                .configOverrides.firstOrNull()
+                ?.value
+        )
+    }
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/strategies/restriction/LowerBoundRestrictionTest.kt b/theodolite/src/test/kotlin/theodolite/strategies/restriction/LowerBoundRestrictionTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..b368647e314a4d803b444268c8218aefbee00ad4
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/strategies/restriction/LowerBoundRestrictionTest.kt
@@ -0,0 +1,118 @@
+package theodolite.strategies.restriction
+
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertNotNull
+import org.junit.jupiter.api.Disabled
+import org.junit.jupiter.api.Test
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+internal class LowerBoundRestrictionTest {
+
+    @Test
+    fun testNoPreviousResults() {
+        val results = Results()
+        val strategy = LowerBoundRestriction(results)
+        val load = buildLoadDimension(10000)
+        val resources = listOf(
+            buildResourcesDimension(1),
+            buildResourcesDimension(2),
+            buildResourcesDimension(3)
+        )
+        val restriction = strategy.apply(load, resources)
+
+        assertEquals(3, restriction.size)
+        assertEquals(resources, restriction)
+    }
+
+    @Test
+    fun testWithSuccessfulPreviousResults() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, true)
+        val strategy = LowerBoundRestriction(results)
+        val load = buildLoadDimension(30000)
+        val resources = listOf(
+            buildResourcesDimension(1),
+            buildResourcesDimension(2),
+            buildResourcesDimension(3)
+        )
+        val restriction = strategy.apply(load, resources)
+
+        assertEquals(2, restriction.size)
+        assertEquals(resources.subList(1, 3), restriction)
+    }
+
+    @Test
+    @Disabled
+    fun testWithNoSuccessfulPreviousResults() {
+        // This test is currently not implemented this way, but might later be the desired behavior.
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, false)
+        results.setResult(20000, 3, false)
+        val strategy = LowerBoundRestriction(results)
+        val load = buildLoadDimension(30000)
+        val resources = listOf(
+            buildResourcesDimension(1),
+            buildResourcesDimension(2),
+            buildResourcesDimension(3)
+        )
+        val restriction = strategy.apply(load, resources)
+
+        assertEquals(0, restriction.size)
+        assertEquals(emptyList<Resource>(), restriction)
+    }
+
+
+    @Test
+    fun testNoPreviousResults2() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 2, true)
+        results.setResult(10000, 1, false)
+        results.setResult(20000, 2, true)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    @Test
+    @Disabled
+    fun testMinRequiredInstancesWhenNotSuccessful() {
+        // This test is currently not implemented this way, but might later be the desired behavior.
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 2, true)
+        results.setResult(10000, 1, false)
+        results.setResult(20000, 2, false)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    private fun buildLoadDimension(load: Int): LoadDimension {
+        return LoadDimension(load, emptyList())
+    }
+
+    private fun buildResourcesDimension(resources: Int): Resource {
+        return Resource(resources, emptyList())
+    }
+
+    private fun Results.setResult(load: Int, resources: Int, successful: Boolean) {
+        this.setResult(
+            Pair(
+                buildLoadDimension(load),
+                buildResourcesDimension(resources)
+            ),
+            successful
+        )
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/util/ExecutionStateComparatorTest.kt b/theodolite/src/test/kotlin/theodolite/util/ExecutionStateComparatorTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7332e53f9e1814f28b8ff37a595b31b0eb931ea7
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/util/ExecutionStateComparatorTest.kt
@@ -0,0 +1,29 @@
+package theodolite.util
+
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Test
+import theodolite.execution.operator.ExecutionCRDummy
+import theodolite.model.crd.ExecutionStates
+
+
+@QuarkusTest
+class ExecutionStateComparatorTest {
+
+    @Test
+    fun testCompare() {
+        val comparator = ExecutionStateComparator(ExecutionStates.RESTART)
+        val execution1 = ExecutionCRDummy("dummy1", "default-benchmark")
+        val execution2 = ExecutionCRDummy("dummy2", "default-benchmark")
+        execution1.getStatus().executionState = ExecutionStates.RESTART.value
+        execution2.getStatus().executionState = ExecutionStates.PENDING.value
+        val list = listOf(execution2.getCR(), execution1.getCR())
+
+
+        assertEquals(
+            list.reversed(),
+            list.sortedWith(comparator)
+        )
+    }
+
+}
\ No newline at end of file
diff --git a/theodolite/src/test/kotlin/theodolite/util/IOHandlerTest.kt b/theodolite/src/test/kotlin/theodolite/util/IOHandlerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6b8aa1d567fd2c93c1301fe3f953273e0f5d5420
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/util/IOHandlerTest.kt
@@ -0,0 +1,139 @@
+package theodolite.util
+
+import com.google.gson.GsonBuilder
+import io.quarkus.test.junit.QuarkusTest
+import org.hamcrest.CoreMatchers.containsString
+import org.hamcrest.MatcherAssert.assertThat
+import org.junit.Rule
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertTrue
+import org.junit.jupiter.api.Test
+import org.junit.rules.TemporaryFolder
+import org.junitpioneer.jupiter.ClearEnvironmentVariable
+import org.junitpioneer.jupiter.SetEnvironmentVariable
+
+
+const val FOLDER_URL = "Test-Folder"
+
+@QuarkusTest
+internal class IOHandlerTest {
+
+    @Rule
+    private var temporaryFolder = TemporaryFolder()
+
+    @Test
+    fun testWriteStringToText() {
+        temporaryFolder.create()
+        val testContent = "Test-File-Content"
+        val folder = temporaryFolder.newFolder(FOLDER_URL)
+
+        IOHandler().writeStringToTextFile(
+            fileURL = "${folder.absolutePath}/test-file.txt",
+            data = testContent
+        )
+
+        assertEquals(
+            testContent,
+            IOHandler().readFileAsString("${folder.absolutePath}/test-file.txt")
+        )
+    }
+
+    @Test
+    fun testWriteToCSVFile() {
+        temporaryFolder.create()
+        val folder = temporaryFolder.newFolder(FOLDER_URL)
+
+        val testContent = listOf(
+            listOf("apples", "red"),
+            listOf("bananas", "yellow"),
+            listOf("avocado", "brown")
+        )
+        val columns = listOf("Fruit", "Color")
+
+        IOHandler().writeToCSVFile(
+            fileURL = "${folder.absolutePath}/test-file",
+            data = testContent,
+            columns = columns
+        )
+
+        var expected = "Fruit,Color\n"
+        testContent.forEach { expected += it[0] + "," + it[1] + "\n" }
+
+        assertEquals(
+            expected.trim(),
+            IOHandler().readFileAsString("${folder.absolutePath}/test-file.csv")
+        )
+    }
+
+    @Test
+    fun testWriteToJSONFile() {
+        temporaryFolder.create()
+        val folder = temporaryFolder.newFolder(FOLDER_URL)
+        val testContent = Resource(0, emptyList())
+
+        IOHandler().writeToJSONFile(
+            fileURL = "${folder.absolutePath}/test-file.json",
+            objectToSave = testContent
+        )
+
+        val expected = GsonBuilder().enableComplexMapKeySerialization().setPrettyPrinting().create().toJson(testContent)
+
+        assertEquals(
+            expected,
+            IOHandler().readFileAsString("${folder.absolutePath}/test-file.json")
+        )
+    }
+
+    // Test the function `getResultFolderString`
+
+    @Test
+    @ClearEnvironmentVariable.ClearEnvironmentVariables(
+        ClearEnvironmentVariable(key = "RESULTS_FOLDER"),
+        ClearEnvironmentVariable(key = "CREATE_RESULTS_FOLDER")
+    )
+    fun testGetResultFolderURL_emptyEnvironmentVars() {
+        assertEquals("", IOHandler().getResultFolderURL())
+    }
+
+
+    @Test()
+    @SetEnvironmentVariable.SetEnvironmentVariables(
+        SetEnvironmentVariable(key = "RESULTS_FOLDER", value = "./src/test/resources"),
+        SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "false")
+    )
+    fun testGetResultFolderURL_FolderExist() {
+        assertEquals("./src/test/resources/", IOHandler().getResultFolderURL())
+    }
+
+    @Test()
+    @SetEnvironmentVariable.SetEnvironmentVariables(
+        SetEnvironmentVariable(key = "RESULTS_FOLDER", value = "$FOLDER_URL-0"),
+        SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "false")
+    )
+    fun testGetResultFolderURL_FolderNotExist() {
+        var exceptionWasThrown = false
+        try {
+            IOHandler().getResultFolderURL()
+        } catch (e: Exception) {
+            exceptionWasThrown = true
+            assertThat(e.toString(), containsString("Result folder not found"))
+        }
+        assertTrue(exceptionWasThrown)
+    }
+
+    @Test()
+    @SetEnvironmentVariable.SetEnvironmentVariables(
+        SetEnvironmentVariable(key = "RESULTS_FOLDER", value = FOLDER_URL),
+        SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "true")
+    )
+    fun testGetResultFolderURL_CreateFolderIfNotExist() {
+        assertEquals("$FOLDER_URL/", IOHandler().getResultFolderURL())
+    }
+
+    @Test()
+    @ClearEnvironmentVariable(key = "RESULTS_FOLDER")
+    @SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "true")
+    fun testGetResultFolderURL_CreateFolderButNoFolderGiven() {
+        assertEquals("", IOHandler().getResultFolderURL())
+    }
+}
diff --git a/theodolite/src/test/kotlin/theodolite/util/ResultsTest.kt b/theodolite/src/test/kotlin/theodolite/util/ResultsTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9cfc2ae78e7a8846e3f0fa136699509145e5de22
--- /dev/null
+++ b/theodolite/src/test/kotlin/theodolite/util/ResultsTest.kt
@@ -0,0 +1,75 @@
+package theodolite.util
+
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertNotNull
+import org.junit.jupiter.api.Disabled
+import org.junit.jupiter.api.Test
+
+@QuarkusTest
+internal class ResultsTest {
+
+    @Test
+    fun testMinRequiredInstancesWhenSuccessful() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, true)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    @Test
+    @Disabled
+    fun testMinRequiredInstancesWhenNotSuccessful() {
+        // This test is currently not implemented this way, but might later be the desired behavior.
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, false)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    private fun Results.setResult(load: Int, resources: Int, successful: Boolean) {
+        this.setResult(
+            Pair(
+                LoadDimension(load, emptyList()),
+                Resource(resources, emptyList())
+            ),
+            successful
+        )
+    }
+
+
+    @Test
+    fun testGetMaxBenchmarkedLoadWhenAllSuccessful() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+
+        val test1 = results.getMaxBenchmarkedLoad(LoadDimension(100000, emptyList()))!!.get()
+
+        assertEquals(10000, test1)
+    }
+
+    @Test
+    fun testGetMaxBenchmarkedLoadWhenLargestNotSuccessful() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+        results.setResult(20000, 1, false)
+
+        val test2 = results.getMaxBenchmarkedLoad(LoadDimension(100000, emptyList()))!!.get()
+
+        assertEquals(20000, test2)
+    }
+}
diff --git a/theodolite/src/test/resources/cpu-deployment.yaml b/theodolite/src/test/resources/cpu-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9845648949babd260192e6c6fa652db976c04288
--- /dev/null
+++ b/theodolite/src/test/resources/cpu-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              cpu: 1000m
+            requests:
+              cpu: 500m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite/src/test/resources/cpu-memory-deployment.yaml b/theodolite/src/test/resources/cpu-memory-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eaae989abb1f3b4fa44f032eee700181fb75e48e
--- /dev/null
+++ b/theodolite/src/test/resources/cpu-memory-deployment.yaml
@@ -0,0 +1,58 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+            requests:
+              memory: 2Gi
+              cpu: 500m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-benchmark.yaml b/theodolite/src/test/resources/k8s-resource-files/test-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e690aa56d74d695b0b81469023ccf82d0046cf45
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-benchmark.yaml
@@ -0,0 +1,38 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: example-benchmark
+spec:
+  appResource:
+    - "uc1-kstreams-deployment.yaml"
+    - "aggregation-service.yaml"
+    - "jmx-configmap.yaml"
+    - "uc1-service-monitor.yaml"
+  loadGenResource:
+    - "uc1-load-generator-deployment.yaml"
+    - "uc1-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc1-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            variableName: "NUM_SENSORS"
+            container: "workload-generator"
+        - type: "NumSensorsLoadGeneratorReplicaPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
\ No newline at end of file
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-configmap.yaml b/theodolite/src/test/resources/k8s-resource-files/test-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dce11c991749e538d856e664539e136e19a8ce6b
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-configmap.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: test-configmap
+data:
+  test: test
\ No newline at end of file
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-deployment.yaml b/theodolite/src/test/resources/k8s-resource-files/test-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e9c4bda12ce781dc85307ec393f821a5df04599e
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-deployment.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: test-deployment
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-execution-1.yaml b/theodolite/src/test/resources/k8s-resource-files/test-execution-1.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1407a9952b7454053d204454841d51cfb4d7dbf4
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-execution-1.yaml
@@ -0,0 +1,28 @@
+apiVersion: theodolite.com/v1
+kind: Execution
+metadata:
+  name: example-execution
+spec:
+  name: test
+  benchmark: "uc1-kstreams"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      threshold: 2000
+      prometheusUrl: "http://prometheus-operated:9090"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-execution-update.yaml b/theodolite/src/test/resources/k8s-resource-files/test-execution-update.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c075702da218397352f1dc1e5b283534fbb4d718
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-execution-update.yaml
@@ -0,0 +1,29 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: example-execution
+spec:
+  name: test
+  benchmark: "uc1-kstreams-update"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      prometheusUrl: "http://prometheus-operated:9090"
+      offset: 0
+      properties:
+        threshold: 2000
+        externalSloUrl: "http://localhost:80/evaluate-slope"
+        warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-execution.yaml b/theodolite/src/test/resources/k8s-resource-files/test-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e12c851da5d8a79f57b1fa59b86239c219370c0f
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-execution.yaml
@@ -0,0 +1,29 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: example-execution
+spec:
+  name: test
+  benchmark: "uc1-kstreams"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      prometheusUrl: "http://prometheus-operated:9090"
+      offset: 0
+      properties:
+        threshold: 2000
+        externalSloUrl: "http://localhost:80/evaluate-slope"
+        warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-service-monitor.yaml b/theodolite/src/test/resources/k8s-resource-files/test-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e8a0e52e15245e790adf2cbf84edb517754267be
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-service-monitor.yaml
@@ -0,0 +1,7 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: test-service-monitor
\ No newline at end of file
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-service.yaml b/theodolite/src/test/resources/k8s-resource-files/test-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..471b6db83525b1afbe8cdac38c42399ecc33ef57
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: test-service
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  selector:
+    app: titan-ccp-aggregation
\ No newline at end of file
diff --git a/theodolite/src/test/resources/k8s-resource-files/test-statefulset.yaml b/theodolite/src/test/resources/k8s-resource-files/test-statefulset.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a8810e9ee156ae1b055c1bef6ed4b29d1c41668
--- /dev/null
+++ b/theodolite/src/test/resources/k8s-resource-files/test-statefulset.yaml
@@ -0,0 +1,16 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: test-statefulset
+spec:
+  selector:
+    matchLabels:
+      app: nginx
+  template:
+    metadata:
+      labels:
+        app: nginx
+    spec:
+      containers:
+        - name: nginx
+          image: k8s.gcr.io/nginx-slim:0.8
diff --git a/theodolite/src/test/resources/memory-deployment.yaml b/theodolite/src/test/resources/memory-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7af278b8c6b2efd13adbcc77e2db5a7b4c4478ad
--- /dev/null
+++ b/theodolite/src/test/resources/memory-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+            requests:
+              memory: 2Gi
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite/src/test/resources/no-resources-deployment.yaml b/theodolite/src/test/resources/no-resources-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0687a3e042575951ec903492589101c122406f7f
--- /dev/null
+++ b/theodolite/src/test/resources/no-resources-deployment.yaml
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file