diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 750f88b315904d57b9f5def70d5f35938e4e7555..0dda0bdb6be4434c91801cb6665364fb7fd63d6a 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,291 +1,32 @@
-image: openjdk:11-jdk
-
-# Disable the Gradle daemon for Continuous Integration servers as correctness
-# is usually a priority over speed in CI environments. Using a fresh
-# runtime for each build is more reliable since the runtime is completely
-# isolated from any previous builds.
-variables:
-  GRADLE_OPTS: "-Dorg.gradle.daemon=false"
-
-cache:
-  paths:
-    - .gradle
-
-before_script:
-  - export GRADLE_USER_HOME=`pwd`/.gradle
+workflow:
+  rules:
+    - if: $CI_MERGE_REQUEST_ID
+      when: never
+    - when: always
 
 stages:
-  - build
-  - test
-  - check
-  - deploy
-
-build:
-  stage: build
-  tags:
-    - exec-docker
-  script: ./gradlew --build-cache assemble
-  artifacts:
-    paths:
-      - "build/libs/*.jar"
-      - "*/build/distributions/*.tar"
-    expire_in: 1 day
-
-test:
-  stage: test
-  tags:
-    - exec-docker
-  script: ./gradlew test --continue
-  artifacts:
-    reports:
-      junit:
-        - "**/build/test-results/test/TEST-*.xml"
-
-checkstyle:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew checkstyle --continue
-  artifacts:
-    paths:
-      - "*/build/reports/checkstyle/main.html"
-    when: on_failure
-    expire_in: 1 day
-
-pmd:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew pmd --continue
-  artifacts:
-    paths:
-      - "*/build/reports/pmd/*.html"
-    when: on_failure
-    expire_in: 1 day
-
-spotbugs:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew spotbugs --continue
-  artifacts:
-    paths:
-      - "*/build/reports/spotbugs/*.html"
-    when: on_failure
-    expire_in: 1 day
-
+  - triggers
 
-.deploy:
-  stage: deploy
-  tags:
-    - exec-dind
-  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
-  # for image usage and settings for building with TLS and docker in docker
-  image: docker:19.03.1
-  services:
-    - docker:19.03.1-dind
-  variables:
-    DOCKER_TLS_CERTDIR: "/certs"
-  script:
-    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
-    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
-    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
-    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
-    - docker logout
+benchmarks:
+  stage: triggers
+  trigger:
+    include: benchmarks/.gitlab-ci.yml
+    strategy: depend
   rules:
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc1-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc1-kstreams-app"
-    JAVA_PROJECT_NAME: "uc1-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc1-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc2-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc2-kstreams-app"
-    JAVA_PROJECT_NAME: "uc2-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc2-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc3-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc3-kstreams-app"
-    JAVA_PROJECT_NAME: "uc3-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
+    - if: "$CI_COMMIT_TAG"
     - changes:
-      - uc3-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
+      - benchmarks/*
+    - when: manual
       allow_failure: true
 
-deploy-uc4-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc4-kstreams-app"
-    JAVA_PROJECT_NAME: "uc4-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc4-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc1-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc1-workload-generator"
-    JAVA_PROJECT_NAME: "uc1-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc1-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc2-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc2-workload-generator"
-    JAVA_PROJECT_NAME: "uc2-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc2-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc3-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc3-workload-generator"
-    JAVA_PROJECT_NAME: "uc3-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc3-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc4-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc4-workload-generator"
-    JAVA_PROJECT_NAME: "uc4-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc4-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-
-# hope this can be merged with .deploy soon, see #51
-.deploy_theodolite:
-  stage: deploy
-  tags:
-    - exec-dind
-  image: docker:19.03.1
-  services:
-    - docker:19.03.1-dind
-  variables:
-    DOCKER_TLS_CERTDIR: "/certs"
-  script:
-    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t $IMAGE_NAME ./$PROJECT_PATH
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
-    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
-    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
-    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
-    - docker logout
+execution:
+  stage: triggers
+  trigger:
+    include: execution/.gitlab-ci.yml
+    strategy: depend
   rules:
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $PROJECT_PATH && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - execution/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $PROJECT_PATH"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $PROJECT_PATH"
-      when: manual
-      allow_failure: true
-
-deploy-theodolite:
-  extends: .deploy_theodolite
-  variables:
-    IMAGE_NAME: "theodolite"
-    PROJECT_PATH: "execution"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $PROJECT_PATH && $CI_COMMIT_TAG"
-      when: always
+    - if: "$CI_COMMIT_TAG"
     - changes:
-      - execution/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $PROJECT_PATH"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $PROJECT_PATH"
-      when: manual
+      - execution/*
+    - when: manual
       allow_failure: true
diff --git a/README.md b/README.md
index 8969a283dbcd252ba0901709ec1de7b6726dda9e..b1011530b67dad11da2e59e3decd400186f3ed5c 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ Theodolite is a framework for benchmarking the horizontal and vertical scalabili
 
 ## Theodolite Benchmarks
 
-Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams. Benchmark implementation for Apache Flink are currently under development and can be found in the *apache-flink* branch of this repository.
+Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams. Benchmark implementation for Apache Flink are currently under development and can be found in the *apache-flink* branch of this repository. The benchmark sources can be found in [Thedolite benchmarks](benchmarks).
 
 
 ## Theodolite Execution Framework
diff --git a/analysis/.dockerignore b/analysis/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..9a715f53b8129933fe1b20baa4af20772de3c872
--- /dev/null
+++ b/analysis/.dockerignore
@@ -0,0 +1,2 @@
+.dockerignore
+Dockerfile
\ No newline at end of file
diff --git a/analysis/Dockerfile b/analysis/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..1e396697f34f86e578890cbb68b7a8d40a21ebf8
--- /dev/null
+++ b/analysis/Dockerfile
@@ -0,0 +1,7 @@
+FROM jupyter/base-notebook
+
+COPY . /home/jovyan
+
+WORKDIR /home/jovyan
+RUN rm -r work
+RUN pip install -r requirements.txt
diff --git a/analysis/README.md b/analysis/README.md
index 263b1db16fcabefe5409ebe744afe5997bc90d89..5fc0179bf9d1e103783fc1bdb2b030aacbb4ed98 100644
--- a/analysis/README.md
+++ b/analysis/README.md
@@ -9,14 +9,32 @@ benchmark execution results and plotting. The following notebooks are provided:
 
 ## Usage
 
-For executing benchmarks and analyzing their results, a **Python 3.7**
-installation is required (e.g., in a virtual environment). Our notebooks require some
-Python libraries, which can be installed via:
+Basically, the Theodolite Analysis Jupyter notebooks should be runnable by any Jupyter server. To make it a bit easier,
+we provide introductions for running notebooks with Docker and with Visual Studio Code. These intoduction may also be
+a good starting point for using another service.
+
+For analyzing and visualizing benchmark results, either Docker or a Jupyter installation with Python 3.7 or newer is
+required (e.g., in a virtual environment).
+
+### Running with Docker
+
+This option requires Docker to be installed. You can build and run a container using the following commands. Make sure
+to set the `results` volume to the directory with your execution results and `results-inst` to a directory where the
+final scalability graphs should be placed. The output of the *run* command gives you an URL of the form
+`http://127.0.0.1:8888/?token=...`, which you should open in your webbrowser. From there you can access all notebooks.
+You can stop the Jupyter server with Crtl + C.
 
 ```sh
-pip install -r requirements.txt 
+docker build . -t theodolite-analysis
+docker run --rm -p 8888:8888 -v "$PWD/../results":/home/jovyan/results -v "$PWD/../results-inst":/home/jovyan/results-inst theodolite-analysis
 ```
 
-We have tested these
-notebooks with [Visual Studio Code](https://code.visualstudio.com/docs/python/jupyter-support),
-however, every other server should be fine as well.
+### Running with Visual Studio Code
+
+The [Visual Studio Code Documentation](https://code.visualstudio.com/docs/python/jupyter-support) shows to run Jupyter
+notebooks with Visual Studio Code. For our notebooks, Python 3.7 or newer is required (e.g., in a virtual environment).
+Moreover, they require some Python libraries, which can be installed by:
+
+```sh
+pip install -r requirements.txt
+```
\ No newline at end of file
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
deleted file mode 100644
index 8c758c24444ea9c590c364063a397f9b7bfec8f9..0000000000000000000000000000000000000000
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
+++ /dev/null
@@ -1,156 +0,0 @@
-package theodolite.commons.kafkastreams;
-
-import java.util.Objects;
-import java.util.Properties;
-import org.apache.kafka.streams.KafkaStreams;
-import org.apache.kafka.streams.StreamsConfig;
-import org.apache.kafka.streams.Topology;
-import titan.ccp.common.kafka.streams.PropertiesBuilder;
-
-/**
- * Builder for the Kafka Streams configuration.
- */
-public abstract class KafkaStreamsBuilder {
-
-  // Kafkastreams application specific
-  protected String schemaRegistryUrl; // NOPMD for use in subclass
-
-  private String applicationName; // NOPMD
-  private String applicationVersion; // NOPMD
-  private String bootstrapServers; // NOPMD
-  private int numThreads = -1; // NOPMD
-  private int commitIntervalMs = -1; // NOPMD
-  private int cacheMaxBytesBuff = -1; // NOPMD
-
-  /**
-   * Sets the application name for the {@code KafkaStreams} application. It is used to create the
-   * application ID.
-   *
-   * @param applicationName Name of the application.
-   * @return
-   */
-  public KafkaStreamsBuilder applicationName(final String applicationName) {
-    this.applicationName = applicationName;
-    return this;
-  }
-
-  /**
-   * Sets the application version for the {@code KafkaStreams} application. It is used to create the
-   * application ID.
-   *
-   * @param applicationVersion Version of the application.
-   * @return
-   */
-  public KafkaStreamsBuilder applicationVersion(final String applicationVersion) {
-    this.applicationVersion = applicationVersion;
-    return this;
-  }
-
-  /**
-   * Sets the bootstrap servers for the {@code KafkaStreams} application.
-   *
-   * @param bootstrapServers String for a bootstrap server.
-   * @return
-   */
-  public KafkaStreamsBuilder bootstrapServers(final String bootstrapServers) {
-    this.bootstrapServers = bootstrapServers;
-    return this;
-  }
-
-  /**
-   * Sets the URL for the schema registry.
-   *
-   * @param url The URL of the schema registry.
-   * @return
-   */
-  public KafkaStreamsBuilder schemaRegistry(final String url) {
-    this.schemaRegistryUrl = url;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
-   * one for using the default.
-   *
-   * @param numThreads Number of threads. -1 for using the default.
-   * @return
-   */
-  public KafkaStreamsBuilder numThreads(final int numThreads) {
-    if (numThreads < -1 || numThreads == 0) {
-      throw new IllegalArgumentException("Number of threads must be greater 0 or -1.");
-    }
-    this.numThreads = numThreads;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for the frequency with which to save the position (offsets in
-   * source topics) of tasks (commit.interval.ms). Must be zero for processing all record, for
-   * example, when processing bulks of records. Can be minus one for using the default.
-   *
-   * @param commitIntervalMs Frequency with which to save the position of tasks. In ms, -1 for using
-   *        the default.
-   * @return
-   */
-  public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
-    if (commitIntervalMs < -1) {
-      throw new IllegalArgumentException("Commit interval must be greater or equal -1.");
-    }
-    this.commitIntervalMs = commitIntervalMs;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for maximum number of memory bytes to be used for record caches
-   * across all threads (cache.max.bytes.buffering). Must be zero for processing all record, for
-   * example, when processing bulks of records. Can be minus one for using the default.
-   *
-   * @param cacheMaxBytesBuffering Number of memory bytes to be used for record caches across all
-   *        threads. -1 for using the default.
-   * @return
-   */
-  public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
-    if (cacheMaxBytesBuffering < -1) {
-      throw new IllegalArgumentException("Cache max bytes buffering must be greater or equal -1.");
-    }
-    this.cacheMaxBytesBuff = cacheMaxBytesBuffering;
-    return this;
-  }
-
-  /**
-   * Method to implement a {@link Topology} for a {@code KafkaStreams} application.
-   *
-   * @return A {@code Topology} for a {@code KafkaStreams} application.
-   */
-  protected abstract Topology buildTopology();
-
-  /**
-   * Build the {@link Properties} for a {@code KafkaStreams} application.
-   *
-   * @return A {@code Properties} object.
-   */
-  protected Properties buildProperties() {
-    return PropertiesBuilder
-        .bootstrapServers(this.bootstrapServers)
-        .applicationId(this.applicationName + '-' + this.applicationVersion)
-        .set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
-        .set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
-        .set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
-        .build();
-  }
-
-  /**
-   * Builds the {@link KafkaStreams} instance.
-   */
-  public KafkaStreams build() {
-    // Check for required attributes for building properties.
-    Objects.requireNonNull(this.applicationName, "Application name has not been set.");
-    Objects.requireNonNull(this.applicationVersion, "Application version has not been set.");
-    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
-    Objects.requireNonNull(this.schemaRegistryUrl, "Schema registry has not been set.");
-
-    // Create the Kafka streams instance.
-    return new KafkaStreams(this.buildTopology(), this.buildProperties());
-  }
-
-}
diff --git a/application-kafkastreams-commons/src/test/java/.gitkeep b/application-kafkastreams-commons/src/test/java/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/benchmarks/.gitlab-ci.yml b/benchmarks/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1a4d354701459d4730dab398e0210ab9189d7ad3
--- /dev/null
+++ b/benchmarks/.gitlab-ci.yml
@@ -0,0 +1,414 @@
+image: openjdk:11-jdk
+
+# Disable the Gradle daemon for Continuous Integration servers as correctness
+# is usually a priority over speed in CI environments. Using a fresh
+# runtime for each build is more reliable since the runtime is completely
+# isolated from any previous builds.
+variables:
+  GRADLE_OPTS: "-Dorg.gradle.daemon=false"
+
+cache:
+  paths:
+    - .gradle
+
+before_script:
+  - cd benchmarks
+  - export GRADLE_USER_HOME=`pwd`/.gradle
+
+stages:
+  - build
+  - test
+  - check
+  - deploy
+
+build:
+  stage: build
+  tags:
+    - exec-docker
+  script: ./gradlew --build-cache assemble
+  artifacts:
+    paths:
+      - "benchmarks/build/libs/*.jar"
+      - "benchmarks/*/build/distributions/*.tar"
+    expire_in: 1 day
+
+test:
+  stage: test
+  tags:
+    - exec-docker
+  script: ./gradlew test --continue
+  artifacts:
+    reports:
+      junit:
+        - "benchmarks/**/build/test-results/test/TEST-*.xml"
+
+checkstyle:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew checkstyle --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/checkstyle/main.html"
+    when: on_failure
+    expire_in: 1 day
+
+pmd:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew pmd --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/pmd/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+spotbugs:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew spotbugs --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/spotbugs/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+
+.deploy:
+  stage: deploy
+  tags:
+    - exec-dind
+  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
+  # for image usage and settings for building with TLS and docker in docker
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
+    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
+    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
+    - docker logout
+  rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+.deploy-ghcr:
+  stage: deploy
+  tags:
+    - exec-dind
+  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
+  # for image usage and settings for building with TLS and docker in docker
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
+    - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin
+    - docker push ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME
+    - docker logout
+  rules:
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc1-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc2-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc3-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc4-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc1-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc2-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc3-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc4-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
diff --git a/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from .settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/.settings/org.eclipse.jdt.ui.prefs
diff --git a/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from .settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from .settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/application-kafkastreams-commons/build.gradle b/benchmarks/application-kafkastreams-commons/build.gradle
similarity index 100%
rename from application-kafkastreams-commons/build.gradle
rename to benchmarks/application-kafkastreams-commons/build.gradle
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
similarity index 85%
rename from application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
rename to benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
index 6302e4c69904aaf57e3f936ee9ad0ead11414a8d..ca1838b84a4f1b3ddf11ad4dea8e34792371974b 100644
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
+++ b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
@@ -9,12 +9,6 @@ public final class ConfigurationKeys {
 
   public static final String APPLICATION_VERSION = "application.version";
 
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
   public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
 
   public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
diff --git a/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..ef1ece3549b1aabf60a4ff5b15028b7e50288cd9
--- /dev/null
+++ b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
@@ -0,0 +1,123 @@
+package theodolite.commons.kafkastreams;
+
+import java.util.Properties;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
+import titan.ccp.common.kafka.streams.PropertiesBuilder;
+
+/**
+ * Builder for the Kafka Streams configuration.
+ */
+public abstract class KafkaStreamsBuilder {
+
+  // Kafka Streams application specific
+  protected final String schemaRegistryUrl; // NOPMD for use in subclass
+  protected final String inputTopic; // NOPMD for use in subclass
+
+  private final Configuration config;
+
+  private final String applicationName; // NOPMD
+  private final String applicationVersion; // NOPMD
+  private final String bootstrapServers; // NOPMD
+
+  /**
+   * Construct a new Build object for a Kafka Streams application.
+   *
+   * @param config Contains the key value pairs for configuration.
+   */
+  public KafkaStreamsBuilder(final Configuration config) {
+    this.config = config;
+    this.applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    this.applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.bootstrapServers = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    this.schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    this.inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+  }
+
+  /**
+   * Checks if the given key is contained in the configurations and sets it in the properties.
+   *
+   * @param <T> Type of the value for given key
+   * @param propBuilder Object where to set this property.
+   * @param key The key to check and set the property.
+   * @param valueGetter Method to get the value from with given key.
+   * @param condition for setting the property.
+   */
+  private <T> void setOptionalProperty(final PropertiesBuilder propBuilder,
+      final String key,
+      final Function<String, T> valueGetter,
+      final Predicate<T> condition) {
+    if (this.config.containsKey(key)) {
+      final T value = valueGetter.apply(key);
+      propBuilder.set(key, value, condition);
+    }
+  }
+
+  /**
+   * Build the {@link Properties} for a {@code KafkaStreams} application.
+   *
+   * @return A {@code Properties} object.
+   */
+  protected Properties buildProperties() {
+    // required configuration
+    final PropertiesBuilder propBuilder = PropertiesBuilder
+        .bootstrapServers(this.bootstrapServers)
+        .applicationId(this.applicationName + '-' + this.applicationVersion);
+
+    // optional configurations
+    this.setOptionalProperty(propBuilder, StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG,
+        this.config::getInt, p -> p > 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG,
+        this.config::getInt,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.COMMIT_INTERVAL_MS_CONFIG,
+        this.config::getInt, p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.MAX_TASK_IDLE_MS_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG,
+        this.config::getInt, p -> p >= 1);
+    this.setOptionalProperty(propBuilder, StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG,
+        this.config::getInt, p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.NUM_STREAM_THREADS_CONFIG,
+        this.config::getInt, p -> p > 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.POLL_MS_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.PROCESSING_GUARANTEE_CONFIG,
+        this.config::getString, p -> StreamsConfig.AT_LEAST_ONCE.equals(p)
+            || StreamsConfig.EXACTLY_ONCE.equals(p) || StreamsConfig.EXACTLY_ONCE_BETA.equals(p));
+    this.setOptionalProperty(propBuilder, StreamsConfig.REPLICATION_FACTOR_CONFIG,
+        this.config::getInt, p -> p >= 0);
+
+    if (this.config.containsKey(StreamsConfig.TOPOLOGY_OPTIMIZATION)
+        && this.config.getBoolean(StreamsConfig.TOPOLOGY_OPTIMIZATION)) {
+      propBuilder.set(StreamsConfig.TOPOLOGY_OPTIMIZATION, StreamsConfig.OPTIMIZE);
+    }
+
+    return propBuilder.build();
+  }
+
+  /**
+   * Method to implement a {@link Topology} for a {@code KafkaStreams} application.
+   *
+   * @return A {@code Topology} for a {@code KafkaStreams} application.
+   */
+  protected abstract Topology buildTopology();
+
+  /**
+   * Builds the {@link KafkaStreams} instance.
+   */
+  public KafkaStreams build() {
+    // Create the Kafka streams instance.
+    return new KafkaStreams(this.buildTopology(), this.buildProperties());
+  }
+
+}
diff --git a/build.gradle b/benchmarks/build.gradle
similarity index 91%
rename from build.gradle
rename to benchmarks/build.gradle
index 1e388cb9665b43e004a1854248acc04e1cda387c..3cb86b68e9d37c53572c6611fad1057b5505e9cc 100644
--- a/build.gradle
+++ b/benchmarks/build.gradle
@@ -6,7 +6,7 @@ buildscript {
     }
   }
   dependencies {
-    classpath "gradle.plugin.com.github.spotbugs:spotbugs-gradle-plugin:1.6.3"
+    classpath "gradle.plugin.com.github.spotbugs.snom:spotbugs-gradle-plugin:4.6.0"
   }
 }
 
@@ -65,6 +65,7 @@ configure(useCaseApplications) {
       implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
       implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
       implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
+      implementation 'com.google.code.gson:gson:2.8.2'
       implementation 'com.google.guava:guava:24.1-jre'
       implementation 'org.jctools:jctools-core:2.1.1'
       implementation 'org.slf4j:slf4j-simple:1.7.25'
@@ -100,6 +101,7 @@ configure(commonProjects) {
       implementation 'org.slf4j:slf4j-simple:1.7.25'
       implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
       implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+      implementation 'org.apache.kafka:kafka-streams:2.6.0'
 
       // Use JUnit test framework
       testImplementation 'junit:junit:4.12'
@@ -108,7 +110,7 @@ configure(commonProjects) {
 
 // Per default XML reports for SpotBugs are generated
 // Include this to generate HTML reports
-tasks.withType(com.github.spotbugs.SpotBugsTask) {
+tasks.withType(com.github.spotbugs.snom.SpotBugsTask) {
   reports {
     // Either HTML or XML reports can be activated
     html.enabled true
@@ -165,7 +167,7 @@ subprojects {
     reportLevel = "low"
     effort = "max"
     ignoreFailures = false
-    toolVersion = '3.1.7'
+    toolVersion = '4.1.4'
   }
 }
 
diff --git a/config/README.md b/benchmarks/config/README.md
similarity index 100%
rename from config/README.md
rename to benchmarks/config/README.md
diff --git a/config/checkstyle-suppression.xml b/benchmarks/config/checkstyle-suppression.xml
similarity index 100%
rename from config/checkstyle-suppression.xml
rename to benchmarks/config/checkstyle-suppression.xml
diff --git a/config/checkstyle.xml b/benchmarks/config/checkstyle.xml
similarity index 100%
rename from config/checkstyle.xml
rename to benchmarks/config/checkstyle.xml
diff --git a/config/eclipse-cleanup.xml b/benchmarks/config/eclipse-cleanup.xml
similarity index 100%
rename from config/eclipse-cleanup.xml
rename to benchmarks/config/eclipse-cleanup.xml
diff --git a/config/eclipse-formatter.xml b/benchmarks/config/eclipse-formatter.xml
similarity index 100%
rename from config/eclipse-formatter.xml
rename to benchmarks/config/eclipse-formatter.xml
diff --git a/config/eclipse-import-order.importorder b/benchmarks/config/eclipse-import-order.importorder
similarity index 100%
rename from config/eclipse-import-order.importorder
rename to benchmarks/config/eclipse-import-order.importorder
diff --git a/config/pmd.xml b/benchmarks/config/pmd.xml
similarity index 100%
rename from config/pmd.xml
rename to benchmarks/config/pmd.xml
diff --git a/config/spotbugs-exclude-filter.xml b/benchmarks/config/spotbugs-exclude-filter.xml
similarity index 100%
rename from config/spotbugs-exclude-filter.xml
rename to benchmarks/config/spotbugs-exclude-filter.xml
diff --git a/benchmarks/gradle/wrapper/gradle-wrapper.jar b/benchmarks/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..457aad0d98108420a977756b7145c93c8910b076
Binary files /dev/null and b/benchmarks/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/benchmarks/gradle/wrapper/gradle-wrapper.properties
similarity index 91%
rename from gradle/wrapper/gradle-wrapper.properties
rename to benchmarks/gradle/wrapper/gradle-wrapper.properties
index e0b3fb8d70b1bbf790f6f8ed1c928ddf09f54628..4d9ca1649142b0c20144adce78e2472e2da01c30 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/benchmarks/gradle/wrapper/gradle-wrapper.properties
@@ -1,5 +1,5 @@
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip
 zipStoreBase=GRADLE_USER_HOME
 zipStorePath=wrapper/dists
diff --git a/gradlew b/benchmarks/gradlew
similarity index 99%
rename from gradlew
rename to benchmarks/gradlew
index cccdd3d517fc5249beaefa600691cf150f2fa3e6..af6708ff229fda75da4f7cc4da4747217bac4d53 100755
--- a/gradlew
+++ b/benchmarks/gradlew
@@ -28,7 +28,7 @@ APP_NAME="Gradle"
 APP_BASE_NAME=`basename "$0"`
 
 # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-DEFAULT_JVM_OPTS=""
+DEFAULT_JVM_OPTS='"-Xmx64m"'
 
 # Use the maximum available, or set MAX_FD != -1 to use that value.
 MAX_FD="maximum"
diff --git a/gradlew.bat b/benchmarks/gradlew.bat
similarity index 94%
rename from gradlew.bat
rename to benchmarks/gradlew.bat
index e95643d6a2ca62258464e83c72f5156dc941c609..0f8d5937c4ad18feb44a19e55ad1e37cc159260f 100644
--- a/gradlew.bat
+++ b/benchmarks/gradlew.bat
@@ -14,7 +14,7 @@ set APP_BASE_NAME=%~n0
 set APP_HOME=%DIRNAME%
 
 @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS=
+set DEFAULT_JVM_OPTS="-Xmx64m"
 
 @rem Find java.exe
 if defined JAVA_HOME goto findJavaFromJavaHome
diff --git a/settings.gradle b/benchmarks/settings.gradle
similarity index 100%
rename from settings.gradle
rename to benchmarks/settings.gradle
diff --git a/uc1-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc1-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc1-application/Dockerfile b/benchmarks/uc1-application/Dockerfile
similarity index 100%
rename from uc1-application/Dockerfile
rename to benchmarks/uc1-application/Dockerfile
diff --git a/uc1-application/build.gradle b/benchmarks/uc1-application/build.gradle
similarity index 100%
rename from uc1-application/build.gradle
rename to benchmarks/uc1-application/build.gradle
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
similarity index 56%
rename from uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
index a35cc37b36fb906e5c5495006126374d4de4656c..f0d8062a2442181507c0bef990b73e0e9cf4a372 100644
--- a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
@@ -3,7 +3,6 @@ package theodolite.uc1.application;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
-import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc1.streamprocessing.Uc1KafkaStreamsBuilder;
 import titan.ccp.common.configuration.ServiceConfigurations;
 
@@ -31,18 +30,9 @@ public class HistoryService {
    */
   private void createKafkaStreamsApplication() {
 
-    final Uc1KafkaStreamsBuilder uc1KafkaStreamsBuilder = new Uc1KafkaStreamsBuilder();
-    uc1KafkaStreamsBuilder.inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC));
-
-    final KafkaStreams kafkaStreams = uc1KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .build();
+    final Uc1KafkaStreamsBuilder uc1KafkaStreamsBuilder = new Uc1KafkaStreamsBuilder(this.config);
+
+    final KafkaStreams kafkaStreams = uc1KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
 
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
similarity index 80%
rename from uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
index 7699ecb48369a2041777b901931c46072a10d99f..14335282863bff5a170716b228ea363e3d739685 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
@@ -1,6 +1,7 @@
 package theodolite.uc1.streamprocessing;
 
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -9,11 +10,9 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  * Builder for the Kafka Streams configuration.
  */
 public class Uc1KafkaStreamsBuilder extends KafkaStreamsBuilder {
-  private String inputTopic; // NOPMD
 
-  public KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc1KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   @Override
diff --git a/uc1-application/src/main/resources/META-INF/application.properties b/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
similarity index 65%
rename from uc1-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc1-application/src/main/resources/META-INF/application.properties
index 3fb301516daa4c7e14875d3d9ca9df9c770eb69e..b46e6246e248cc524c5b6249348c76ded6ec468b 100644
--- a/uc1-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
@@ -3,10 +3,6 @@ application.version=0.0.1
 
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
-kafka.output.topic=output
 
 schema.registry.url=http://localhost:8091
 
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc1-workload-generator/Dockerfile b/benchmarks/uc1-workload-generator/Dockerfile
similarity index 100%
rename from uc1-workload-generator/Dockerfile
rename to benchmarks/uc1-workload-generator/Dockerfile
diff --git a/uc1-workload-generator/build.gradle b/benchmarks/uc1-workload-generator/build.gradle
similarity index 100%
rename from uc1-workload-generator/build.gradle
rename to benchmarks/uc1-workload-generator/build.gradle
diff --git a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
diff --git a/uc2-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc2-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc2-application/Dockerfile b/benchmarks/uc2-application/Dockerfile
similarity index 100%
rename from uc2-application/Dockerfile
rename to benchmarks/uc2-application/Dockerfile
diff --git a/uc2-application/README.md b/benchmarks/uc2-application/README.md
similarity index 100%
rename from uc2-application/README.md
rename to benchmarks/uc2-application/README.md
diff --git a/uc2-application/build.gradle b/benchmarks/uc2-application/build.gradle
similarity index 100%
rename from uc2-application/build.gradle
rename to benchmarks/uc2-application/build.gradle
diff --git a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
similarity index 67%
rename from uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
index c094adfcd7952e81115dae84ed9c0d371e380c98..2f828278f5a3033c3e479bf82f3c8c5d9d4c380c 100644
--- a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
@@ -36,26 +36,15 @@ public class AggregationService {
    * @param clusterSession the database session which the application should use.
    */
   private void createKafkaStreamsApplication() {
-    // Use case specific stream configuration
-    final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder();
+    final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder(this.config);
     uc2KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .feedbackTopic(this.config.getString(ConfigurationKeys.KAFKA_FEEDBACK_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .configurationTopic(this.config.getString(ConfigurationKeys.KAFKA_CONFIGURATION_TOPIC))
         .emitPeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.EMIT_PERIOD_MS)))
         .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.GRACE_PERIOD_MS)));
 
-    // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
similarity index 93%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
index 16addb8510eec2254d4787edbfbfbe186996fdea..1a606ee3df5e6ac2f43b650afe4a9aed036df9cd 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc2.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -14,16 +15,14 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
   private static final Duration EMIT_PERIOD_DEFAULT = Duration.ofSeconds(1);
   private static final Duration GRACE_PERIOD_DEFAULT = Duration.ZERO;
 
-  private String inputTopic; // NOPMD
   private String feedbackTopic; // NOPMD
   private String outputTopic; // NOPMD
   private String configurationTopic; // NOPMD
   private Duration emitPeriod; // NOPMD
   private Duration gracePeriod; // NOPMD
 
-  public Uc2KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc2KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc2KafkaStreamsBuilder feedbackTopic(final String feedbackTopic) {
diff --git a/uc2-application/src/main/resources/META-INF/application.properties b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
similarity index 78%
rename from uc2-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc2-application/src/main/resources/META-INF/application.properties
index 10c47960adb012ba5c572e3833a37d821189eb8e..8f1af5f590eff7f2b12706d61a7c89d9152f7949 100644
--- a/uc2-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
@@ -10,8 +10,4 @@ kafka.output.topic=output
 schema.registry.url=http://localhost:8091
 
 emit.period.ms=5000
-grace.period.ms=0
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
+grace.period.ms=0
\ No newline at end of file
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
diff --git a/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc2-workload-generator/Dockerfile b/benchmarks/uc2-workload-generator/Dockerfile
similarity index 100%
rename from uc2-workload-generator/Dockerfile
rename to benchmarks/uc2-workload-generator/Dockerfile
diff --git a/uc2-workload-generator/build.gradle b/benchmarks/uc2-workload-generator/build.gradle
similarity index 100%
rename from uc2-workload-generator/build.gradle
rename to benchmarks/uc2-workload-generator/build.gradle
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
diff --git a/uc2-workload-generator/src/main/resources/META-INF/application.properties b/benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties
similarity index 100%
rename from uc2-workload-generator/src/main/resources/META-INF/application.properties
rename to benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties
diff --git a/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java b/benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
similarity index 100%
rename from uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
rename to benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
diff --git a/uc3-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc3-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc3-application/Dockerfile b/benchmarks/uc3-application/Dockerfile
similarity index 100%
rename from uc3-application/Dockerfile
rename to benchmarks/uc3-application/Dockerfile
diff --git a/uc3-application/build.gradle b/benchmarks/uc3-application/build.gradle
similarity index 100%
rename from uc3-application/build.gradle
rename to benchmarks/uc3-application/build.gradle
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
similarity index 63%
rename from uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
index b245b1645c9e5ee68df3f108802c9b91d70cf017..349512f988bb182d8851e458a1bce244c756bbfe 100644
--- a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
@@ -34,23 +34,13 @@ public class HistoryService {
    *
    */
   private void createKafkaStreamsApplication() {
-    // Use case specific stream configuration
-    final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder();
+    final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder(this.config);
     uc3KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .windowDuration(Duration.ofMinutes(this.windowDurationMinutes));
 
-    // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder.build();
+
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
   }
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
similarity index 88%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
index e74adf7c87673cc0e6ea4004dbcb1c0a6fc907ac..9ab4ea0a96c663af09008bd5358066ca3f8520ac 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc3.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -11,13 +12,11 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  */
 public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder {
 
-  private String inputTopic; // NOPMD
   private String outputTopic; // NOPMD
   private Duration windowDuration; // NOPMD
 
-  public Uc3KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc3KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc3KafkaStreamsBuilder outputTopic(final String outputTopic) {
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
diff --git a/uc3-application/src/main/resources/META-INF/application.properties b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
similarity index 77%
rename from uc3-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc3-application/src/main/resources/META-INF/application.properties
index 2ceaf37224b0bff54b09beaabe29210216e11671..011406f7ef1e23647eeae150d349f472214cbcd4 100644
--- a/uc3-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
@@ -7,7 +7,3 @@ kafka.output.topic=output
 kafka.window.duration.minutes=1
 
 schema.registry.url=http://localhost:8091
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc3-workload-generator/Dockerfile b/benchmarks/uc3-workload-generator/Dockerfile
similarity index 100%
rename from uc3-workload-generator/Dockerfile
rename to benchmarks/uc3-workload-generator/Dockerfile
diff --git a/uc3-workload-generator/build.gradle b/benchmarks/uc3-workload-generator/build.gradle
similarity index 100%
rename from uc3-workload-generator/build.gradle
rename to benchmarks/uc3-workload-generator/build.gradle
diff --git a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
diff --git a/uc4-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc4-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc4-application/Dockerfile b/benchmarks/uc4-application/Dockerfile
similarity index 100%
rename from uc4-application/Dockerfile
rename to benchmarks/uc4-application/Dockerfile
diff --git a/uc4-application/build.gradle b/benchmarks/uc4-application/build.gradle
similarity index 100%
rename from uc4-application/build.gradle
rename to benchmarks/uc4-application/build.gradle
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
similarity index 67%
rename from uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
index 23af805733de2bb3f6384fa924a2322490ee58d9..12f35e8dcc532b19e470722094ba5aff07420ad2 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
@@ -32,9 +32,8 @@ public class HistoryService {
    */
   private void createKafkaStreamsApplication() {
     // Use case specific stream configuration
-    final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder();
+    final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder(this.config);
     uc4KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .aggregtionDuration(
             Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_DURATION_DAYS)))
@@ -42,15 +41,7 @@ public class HistoryService {
             Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_ADVANCE_DAYS)));
 
     // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
similarity index 91%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
index 7c9e2c4f790cf1fbb7dd34db573576d1e64077db..bbbb043119857612b1a8b0c60e3a5466cd68447e 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc4.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -11,14 +12,12 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  */
 public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
 
-  private String inputTopic; // NOPMD
   private String outputTopic; // NOPMD
   private Duration aggregtionDuration; // NOPMD
   private Duration aggregationAdvance; // NOPMD
 
-  public Uc4KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc4KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc4KafkaStreamsBuilder outputTopic(final String outputTopic) {
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
diff --git a/uc4-application/src/main/resources/META-INF/application.properties b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
similarity index 79%
rename from uc4-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc4-application/src/main/resources/META-INF/application.properties
index e577c880a8ff8169699acb8598e323b8671e8d5e..b46681533e63bf86a51439778a46940da348559d 100644
--- a/uc4-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
@@ -8,7 +8,3 @@ aggregation.duration.days=30
 aggregation.advance.days=1
 
 schema.registry.url=http://localhost:8091
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc4-workload-generator/Dockerfile b/benchmarks/uc4-workload-generator/Dockerfile
similarity index 100%
rename from uc4-workload-generator/Dockerfile
rename to benchmarks/uc4-workload-generator/Dockerfile
diff --git a/uc4-workload-generator/build.gradle b/benchmarks/uc4-workload-generator/build.gradle
similarity index 100%
rename from uc4-workload-generator/build.gradle
rename to benchmarks/uc4-workload-generator/build.gradle
diff --git a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
diff --git a/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/workload-generator-commons/build.gradle b/benchmarks/workload-generator-commons/build.gradle
similarity index 100%
rename from workload-generator-commons/build.gradle
rename to benchmarks/workload-generator-commons/build.gradle
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/docker-test/uc1-docker-compose/docker-compose.yml
index d394255951151d931b73e4c923bb10ecaed66a2c..905e6e30bfd38900e896be45d8a4b15389b2f54f 100755
--- a/docker-test/uc1-docker-compose/docker-compose.yml
+++ b/docker-test/uc1-docker-compose/docker-compose.yml
@@ -1,26 +1,53 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "9092"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    #ports:
+    #  - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    ports:
+      - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc1-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
     image: theodolite/theodolite-uc1-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/docker-test/uc2-docker-compose/docker-compose.yml
index f730148a89d41a819d81a4770e0d53a960dbe493..e6511bfd9fa7ea1e62bf9f3787ac6f3c0acc0107 100755
--- a/docker-test/uc2-docker-compose/docker-compose.yml
+++ b/docker-test/uc2-docker-compose/docker-compose.yml
@@ -1,26 +1,53 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "9092"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    ports:
+      - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc2-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
     image: theodolite/theodolite-uc2-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/docker-test/uc3-docker-compose/docker-compose.yml
index 2a3cb23a79f9edda699fe1bb07c1b922614aeb13..9d2da8e87621c1902ff101efd42ff52436416b77 100755
--- a/docker-test/uc3-docker-compose/docker-compose.yml
+++ b/docker-test/uc3-docker-compose/docker-compose.yml
@@ -1,27 +1,58 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
+    #image: wurstmeister/zookeeper
+    image: confluentinc/cp-zookeeper
     ports:
       - "2181:2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
     ports:
       - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    ports:
+      - "8081:8081"
+    expose:
+      - "8081"
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc3-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
       KAFKA_WINDOW_DURATION_MINUTES: 60
   uc-wg: 
     image: theodolite/theodolite-uc3-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/docker-test/uc4-docker-compose/docker-compose.yml
index 1f015f23b2e8b98eba27ae6f387adb123ae2ccc2..530852b2df5ef2c70f03a11ac2445ce587a3760f 100755
--- a/docker-test/uc4-docker-compose/docker-compose.yml
+++ b/docker-test/uc4-docker-compose/docker-compose.yml
@@ -1,25 +1,32 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
+    #image: wurstmeister/zookeeper
+    image: confluentinc/cp-zookeeper
     ports:
       - "2181:2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
   schema-registry:
     image: confluentinc/cp-schema-registry:5.3.1
     depends_on:
       - zookeeper
       - kafka
+    ports:
+      - "8081:8081"
     expose:
       - "8081"
     environment:
@@ -27,10 +34,22 @@ services:
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc4-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
   uc-wg: 
     image: theodolite/theodolite-uc4-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
+      NUM_SENSORS: 100
diff --git a/execution/.gitlab-ci.yml b/execution/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c1a58f4bb9fd0adfe4cb5c22881c78a72cd13d14
--- /dev/null
+++ b/execution/.gitlab-ci.yml
@@ -0,0 +1,61 @@
+stages:
+  - deploy
+
+.deploy:
+  stage: deploy
+  tags:
+    - exec-dind
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite ./execution
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:$CI_COMMIT_TAG"
+    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
+    - docker push $DOCKERHUB_ORG/theodolite
+    - docker logout
+  rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - execution/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW"
+      when: manual
+      allow_failure: true
+  
+deploy-ghcr:
+  stage: deploy
+  tags:
+    - exec-dind
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite ./execution
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/theodolite:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/theodolite:$CI_COMMIT_TAG"
+    - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin
+    - docker push ghcr.io/$GITHUB_CR_ORG/theodolite
+    - docker logout
+  rules:
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - execution/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN"
+      when: manual
+      allow_failure: true
+      
\ No newline at end of file
diff --git a/execution/README.md b/execution/README.md
index 32c7cee386cfe1ce7f39ffd4539ea37675b83fbf..d8f30d0742d6e2037840332ec597637619510c79 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -2,51 +2,34 @@
 
 This directory contains the Theodolite framework for executing scalability
 benchmarks in a Kubernetes cluster. As Theodolite aims for executing benchmarks
-in realistic execution environments,, some third-party components are [required](#requirements).
+in realistic execution environments, some third-party components are [required](#installation).
 After everything is installed and configured, you can move on the [execution of
 benchmarks](#execution).
 
-## Requirements
+## Installation
 
-### Kubernetes Cluster
+For executing benchmarks, access to a Kubernetes cluster is required. If you already run other applications inside your
+cluster, you might want to consider creating a dedicated namespace for your benchmarks.
 
-For executing benchmarks, access to Kubernetes cluster is required. We suggest
-to create a dedicated namespace for executing our benchmarks. The following
-services need to be available as well.
+### Installing Dependencies
 
-### Kubernetes Volume
-
-For executing the benchmark as a Kubernetes job it is required to use a volume to store the results of the executions.
-In `infrastructure/kubernetes` are two files for creating a volume.
-Either one of them should be used.
-
-The `volumeSingle.yaml` is meant for systems where Kubernetes is run locally (e.g. minikube, kind etc.).
-However, you can also use the other file.
-In `volumeSingle.yaml` you need to set `path` to the path on your machine where the results should be stored.
-
-The `volumeCluster.yaml` should be used when Kubernetes runs in the cloud.
-In the `nodeAffinity` section you need to exchange `<node-name>` to the name of the node where the volume should be created (this node will most likely execute also the job).
-However, you can also set a different `nodeAffinity`.
-Further you need to set `path` to the path on the node where the results should be stored.
-
-After setting the properties you can create the volume with:
-
-```sh
-kubectl apply -f iinfrastructure/kubernetes/volume(Single|Cluster).yaml
-```
+The following third-party services need to be installed in your cluster. For most of them, the suggested way to install
+them is via [Helm](https://helm.sh).
 
 #### Prometheus
 
 We suggest to use the [Prometheus Operator](https://github.com/coreos/prometheus-operator)
 and create a dedicated Prometheus instance for these benchmarks.
 
-If Prometheus Operator is not already available on your cluster, a convenient
-way to install is via the [**unofficial** Prometheus Operator Helm chart](https://github.com/helm/charts/tree/master/stable/prometheus-operator).
-As you may not need an entire cluster monitoring stack, you can use our Helm
-configuration to only install the operator:
+If Prometheus Operator is not already available on your cluster, a convenient way to install it is via the
+[Prometheus community Helm chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack).
+As you may not need an entire cluster monitoring stack, you can use our Helm configuration to only install the
+operator:
 
 ```sh
-helm install prometheus-operator stable/prometheus-operator -f infrastructure/prometheus/helm-values.yaml
+helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
+helm repo update
+helm install prometheus-operator prometheus-community/kube-prometheus-stack -f infrastructure/prometheus/helm-values.yaml
 ```
 
 After installation, you need to create a Prometheus instance:
@@ -55,9 +38,17 @@ After installation, you need to create a Prometheus instance:
 kubectl apply -f infrastructure/prometheus/prometheus.yaml
 ```
 
-You might also need to apply the [ServiceAccount](infrastructure/prometheus/service-account.yaml), [ClusterRole](infrastructure/prometheus/cluster-role.yaml)
-and the [CusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml),
-depending on your cluster's security policies.
+You might also need to apply the [ClusterRole](infrastructure/prometheus/cluster-role.yaml), the
+[CusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml) and the
+[ServiceAccount](infrastructure/prometheus/service-account.yaml), depending on your cluster's security
+policies. If you are not in the *default* namespace, alter the namespace in
+[Prometheus' ClusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml) accordingly.
+
+```sh
+kubectl apply -f infrastructure/prometheus/cluster-role.yaml
+kubectl apply -f infrastructure/prometheus/cluster-role-binding.yaml
+kubectl apply -f infrastructure/prometheus/service-account.yaml
+```
 
 For the individual benchmarking components to be monitored, [ServiceMonitors](https://github.com/coreos/prometheus-operator#customresourcedefinitions)
 are used. See the corresponding sections below for how to install them.
@@ -68,44 +59,32 @@ As with Prometheus, we suggest to create a dedicated Grafana instance. Grafana
 with our default configuration can be installed with Helm:
 
 ```sh
-helm install grafana stable/grafana -f infrastructure/grafana/values.yaml
+helm repo add grafana https://grafana.github.io/helm-charts
+helm repo update
+helm install grafana grafana/grafana -f infrastructure/grafana/values.yaml
 ```
 
 The official [Grafana Helm Chart repository](https://github.com/helm/charts/tree/master/stable/grafana)
 provides further documentation including a table of configuration options.
 
-We provide ConfigMaps for a [Grafana dashboard](infrastructure/grafana/dashboard-config-map.yaml) and a [Grafana data source](infrastructure/grafana/prometheus-datasource-config-map.yaml).
-
-Create the Configmap for the dashboard:
+We provide ConfigMaps for a [Grafana dashboard](infrastructure/grafana/dashboard-config-map.yaml) and a
+[Grafana data source](infrastructure/grafana/prometheus-datasource-config-map.yaml). Create them as follows:
 
 ```sh
 kubectl apply -f infrastructure/grafana/dashboard-config-map.yaml
-```
-
-Create the Configmap for the data source:
-
-```sh
 kubectl apply -f infrastructure/grafana/prometheus-datasource-config-map.yaml
 ```
 
 #### A Kafka cluster
 
-One possible way to set up a Kafka cluster is via [Confluent's Helm Charts](https://github.com/confluentinc/cp-helm-charts).
-For using these Helm charts and conjuction with the Prometheus Operator (see
-below), we provide a [patch](https://github.com/SoerenHenning/cp-helm-charts)
-for these helm charts. Note that this patch is only required for observation and
-not for the actual benchmark execution and evaluation.
-
-##### Our patched Confluent Helm Charts
-
-To use our patched Confluent Helm Charts clone the
-[chart's repsoitory](https://github.com/SoerenHenning/cp-helm-charts). We also
-provide a [default configuration](infrastructure/kafka/values.yaml). If you do
+We suggest to set up a Kafka cluster via [Confluent's Helm Charts](https://github.com/confluentinc/cp-helm-charts).
+Currently, these charts do not expose all metrics, we would like to display. Thus, we provide a patched version of this chart.
+We also provide a [default configuration](infrastructure/kafka/values.yaml). If you do
 not want to deploy 10 Kafka and 3 Zookeeper instances, alter the configuration
-file accordingly. To install Confluent's Kafka and use the configuration:
+file accordingly. To install the patched Confluent's Kafka with our configuration:
 
 ```sh
-helm install my-confluent <path-to-cp-helm-charts> -f infrastructure/kafka/values.yaml
+helm install my-confluent https://github.com/SoerenHenning/cp-helm-charts/releases/download/v6.0.1-1-JMX-FIX/cp-helm-charts-0.6.0.tgz -f infrastructure/kafka/values.yaml
 ```
 
 To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor:
@@ -114,10 +93,11 @@ To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor:
 kubectl apply -f infrastructure/kafka/service-monitor.yaml
 ```
 
-##### Other options for Kafka
-
 Other Kafka deployments, for example, using Strimzi, should work in a similar way.
 
+*Please note that currently, even if installed differently, the corresponding services must run at
+*my-confluent-cp-kafka:9092*, *my-confluent-cp-zookeeper:2181* and *my-confluent-cp-schema-registry:8081*.
+
 #### A Kafka Client Pod
 
 A permanently running pod used for Kafka configuration is started via:
@@ -144,53 +124,110 @@ To install it:
 helm install kafka-lag-exporter https://github.com/lightbend/kafka-lag-exporter/releases/download/v0.6.3/kafka-lag-exporter-0.6.3.tgz -f infrastructure/kafka-lag-exporter/values.yaml
 ```
 
+### Installing Theodolite
 
-### Python 3.7
+While Theodolite itself has not be installed as it is loaded at runtime (see [execution](#Execution)), it requires some
+resources to be deployed in your cluster. These resources are grouped under RBAC and Volume in the following paragraphs.
 
-For executing benchmarks, a **Python 3.7** installation is required. We suggest
-to use a virtual environment placed in the `.venv` directory (in the Theodolite
-root directory). As set of requirements is needed. You can install them with the following
-command (make sure to be in your virtual environment if you use one):
+#### Theodolite RBAC
+
+**The following step is only required if RBAC is enabled in your cluster.** If you are not sure whether this is the
+case, you want to simply try it without the following step.
+
+If RBAC is enabled in your cluster, you have to allow Theodolite to start and stop pods etc. To do so, deploy the RBAC
+resources via:
 
 ```sh
-pip install -r requirements.txt
+kubectl apply -f infrastructure/kubernetes/rbac/role.yaml
+kubectl apply -f infrastructure/kubernetes/rbac/role-binding.yaml
+kubectl apply -f infrastructure/kubernetes/rbac/service-account.yaml
 ```
 
+#### Theodolite Volume
+
+In order to persistently store benchmark results, Theodolite needs a volume mounted. We provide pre-configured
+declarations for different volume types.
+
+##### *hostPath* volume
 
-### Required Manual Adjustments
+Using a [hostPath volume](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) is the easiest option when
+running Theodolite locally, e.g., with minikube or kind.
 
-Depending on your setup, some additional adjustments may be necessary:
+Just modify `infrastructure/kubernetes/volumeSingle.yaml` by setting `path` to the directory on your host machine where
+all benchmark results should be stored and run:
+
+```sh
+kubectl apply -f infrastructure/kubernetes/volumeSingle.yaml
+```
 
-* Change Kafka and Zookeeper servers in the Kubernetes deployments (uc1-application etc.) and `run_XX.sh` scripts
-* Change Prometheus' URL in `lag_analysis.py`
-* Change the path to your Python 3.7 virtual environment in the `run_XX.sh` schripts (to find the venv's `bin/activate`)
-* Change the name of your Kubernetes namespace for [Prometheus' ClusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml)
-* *Please let us know if there are further adjustments necessary*
+##### *local* volume
 
+A [local volume](https://kubernetes.io/docs/concepts/storage/volumes/#local) is a simple option to use when having
+access (e.g. via SSH) to one of your cluster nodes.
+
+You first need to create a directory on a selected node where all benchmark results should be stored. Next, modify
+`infrastructure/kubernetes/volumeCluster.yaml` by setting `<node-name>` to your selected node (this node will most
+likely also execute the job). Further, you have to set `path` to the directory on the node you just created. To deploy
+you volume run:
+
+```sh
+kubectl apply -f infrastructure/kubernetes/volumeCluster.yaml
+```
+
+##### Other volumes
+
+To use volumes provided by public cloud providers or network-based file systems, you can use the definitions in
+`infrastructure/kubernetes/` as a starting point. See the offical
+[volumes documentation](https://kubernetes.io/docs/concepts/storage/volumes/) for additional information.
 
 
 ## Execution
 
-Please note that a **Python 3.7** installation is required for executing Theodolite.
+The preferred way to run scalability benchmarks with Theodolite is to deploy Theodolite
+[Kubernetes Jobs](https://kubernetes.io/docs/concepts/workloads/controllers/job/) in your cluster. For running
+Theodolite locally on your machine see the description below.
+
+`theodolite.yaml` provides a template for your own Theodolite job. To run your own job, create a copy, give it a name
+(`metadata.name`) and adjust configuration parameters as desired. For a description of available configuration options
+see the [Configuration](#configuration) section below. Note, that you might uncomment the `serviceAccountName` line if
+RBAC is enabled on your cluster (see installation of [Theodolite RBAC](#Theodolite-RBAC)).
 
-The `./theodolite.py` is the entrypoint for all benchmark executions. Is has to be called as follows:
+To start the execution of a benchmark run (with `<your-theodolite-yaml>` being your job definition):
 
 ```sh
-./theodolite.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration> <domain-restriction> <search-strategy>
+kubectl apply -f <your-theodolite-yaml>
 ```
 
-* `<use-case>`: Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`.
-* `<wl-values>`: Values for the workload generator to be tested, separated by commas and sorted in ascending order. For example `100000,200000,300000`.
-* `<instances>`: Numbers of instances to be benchmarked, separated by commas and sorted in ascending order. For example `1,2,3,4`.
-* `<partitions>`: Number of partitions for Kafka topics. Optional. Default `40`.
-* `<cpu-limit>`: Kubernetes CPU limit. Optional. Default `1000m`.
-* `<memory-limit>`: Kubernetes memory limit. Optional. Default `4Gi`.
-* `<commit-interval>`: Kafka Streams' commit interval in milliseconds. Optional. Default `100`.
-* `<duration>`: Duration in minutes subexperiments should be executed for. Optional. Default `5`.
-* `<domain-restriction>`: The domain restriction: `restrict-domain` to use domain restriction, `no-domain-restriction` to not use domain restriction. Default `no-domain-restriction`. For more details see Section _Domain Restriction_.
-* `<search-strategy>`: The benchmarking search strategy. Can be set to `check-all`, `linear-search` or `binary-search`. Default `check-all`. For more details see Section _Benchmarking Search Strategies_.
+This will create a pod with a name such as `your-job-name-xxxxxx`. You can verifiy this via `kubectl get pods`. With
+`kubectl logs -f <your-job-name-xxxxxx>`, you can follow the benchmark execution logs.
+
+Once your job is completed (you can verify via `kubectl get jobs), its results are stored inside your configured
+Kubernetes volume.
+
+**Make sure to always run only one Theodolite job at a time.**
+
+### Configuration
+
+| Command line         | Kubernetes          | Description                                                  |
+| -------------------- | ------------------- | ------------------------------------------------------------ |
+| --uc                 | UC                  | **[Mandatory]** Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`. |
+| --loads              | LOADS               | **[Mandatory]** Values for the workload generator to be tested, should be sorted in ascending order. |
+| --instances          | INSTANCES           | **[Mandatory]** Numbers of instances to be benchmarked, should be sorted in ascending order. |
+| --duration           | DURATION            | Duration in minutes subexperiments should be executed for. *Default:* `5`. |
+| --partitions         | PARTITIONS          | Number of partitions for Kafka topics. *Default:* `40`.      |
+| --cpu-limit          | CPU_LIMIT           | Kubernetes CPU limit for a single Pod.  *Default:* `1000m`.  |
+| --memory-limiT       | MEMORY_LIMIT        | Kubernetes memory limit for a single Pod. *Default:* `4Gi`.  |
+| --domain-restriction | DOMAIN_RESTRICTION  | A flag that indiciates domain restriction should be used. *Default:* not set. For more details see Section [Domain Restriction](#domain-restriction). |
+| --search-strategy    | SEARCH_STRATEGY     | The benchmarking search strategy. Can be set to `check-all`, `linear-search` or `binary-search`. *Default:* `check-all`. For more details see Section [Benchmarking Search Strategies](#benchmarking-search-strategies). |
+| --reset              | RESET               | Resets the environment before each subexperiment. Useful if execution was aborted and just one experiment should be executed. |
+| --reset-only         | RESET_ONLY          | Only resets the environment. Ignores all other parameters. Useful if execution was aborted and one want a clean state for new executions. |
+| --prometheus         | PROMETHEUS_BASE_URL | Defines where to find the prometheus instance. *Default:* `http://localhost:9090` |
+| --path               | RESULT_PATH         | A directory path for the results. Relative to the Execution folder. *Default:* `results` |
+| --configurations     | CONFIGURATIONS      | Defines environment variables for the use cases and, thus, enables further configuration options. |
+| --threshold          | THRESHOLD           | The threshold for the trend slop that the search strategies use to determine that a load could be handled. *Default:* `2000` |
 
 ### Domain Restriction
+
 For dimension value, we have a domain of the amounts of instances. As a consequence, for each dimension value the maximum number of lag experiments is equal to the size of the domain. How the domain is determined is defined by the following domain restriction strategies.
 
 * `no-domain-restriction`: For each dimension value, the domain of instances is equal to the set of all amounts of instances.
@@ -199,8 +236,45 @@ For dimension value, we have a domain of the amounts of instances. As a conseque
     * If the dimension value is not the smallest dimension value and N is the amount of minimal amount of instances that was suitable for the last smaller dimension value the domain for this dimension value contains all amounts of instances greater than, or equal to N.
 
 ### Benchmarking Search Strategies
+
 There are the following benchmarking strategies:
 
 * `check-all`: For each dimension value, execute one lag experiment for all amounts of instances within the current domain.
 * `linear-search`: A heuristic which works as follows: For each dimension value, execute one lag experiment for all number of instances within the current domain. The execution order is from the lowest number of instances to the highest amount of instances and the execution for each dimension value is stopped, when a suitable amount of instances is found or if all lag experiments for the dimension value were not successful.
 * `binary-search`: A heuristic which works as follows: For each dimension value, execute one lag experiment for all number of instances within the current domain. The execution order is in a binary-search-like manner. The execution is stopped, when a suitable amount of instances is found or if all lag experiments for the dimension value were not successful.
+
+## Observation
+
+The installed Grafana instance provides a dashboard to observe the benchmark execution. Unless configured otherwise,
+this dashboard can be accessed via `http://<cluster-ip>:31199` or via `http://localhost:31199` if proxied with
+`kubectl port-forward svc/grafana 8080:service`. Default credentials are user *admin* with password *admin*.
+
+
+## Local Execution (e.g. for Development)
+
+As an alternative to executing Theodolite as a Kubernetes Job, it is also possible to run it from your local system,
+for example, for development purposes. In addition to the generel installation instructions, the following adjustments
+are neccessary.
+
+### Installation
+
+For local execution a **Python 3.7** installation is required. We suggest to use a virtual environment placed in the `.venv`
+directory (in the Theodolite root directory). A set of requirements is needed. You can install them with the following
+command (make sure to be in your virtual environment if you use one):
+
+```sh
+pip install -r requirements.txt
+```
+
+Kubernetes volumes and service accounts, roles, and role bindings for Theodolite are not required in this case.
+
+### Local Execution
+
+The `theodolite.py` is the entrypoint for all benchmark executions. Is has to be called as follows:
+
+```python
+python theodolite.py --uc <uc> --loads <load> [<load> ...] --instances <instances> [<instances> ...]
+```
+
+This command is the minimal command for execution. Further configurations options are described [above](#configuration)
+or available via `python theodolite.py -h`.
\ No newline at end of file
diff --git a/execution/infrastructure/grafana/values.yaml b/execution/infrastructure/grafana/values.yaml
index 211a72a61a2699c7108ec4adb9a7edebbccecb69..562516ad76f9a0f88c0db8557da51178dbbc9871 100644
--- a/execution/infrastructure/grafana/values.yaml
+++ b/execution/infrastructure/grafana/values.yaml
@@ -11,7 +11,9 @@ adminPassword: admin
 ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
 ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
 sidecar:
-  image: kiwigrid/k8s-sidecar:0.1.99
+  image:
+    repository: "kiwigrid/k8s-sidecar"
+    tag: "1.1.0"
   imagePullPolicy: IfNotPresent
   dashboards:
     enabled: true
diff --git a/execution/infrastructure/kafka/values.yaml b/execution/infrastructure/kafka/values.yaml
index 51dcb09d5f24579b148811c8a1c27fe165c3fb56..1efbda0515d0a9c881552cb63293ca8cc28c98b2 100644
--- a/execution/infrastructure/kafka/values.yaml
+++ b/execution/infrastructure/kafka/values.yaml
@@ -53,7 +53,7 @@ cp-kafka:
     "replica.fetch.max.bytes": "134217728" # 128 MB
     # "default.replication.factor": 3
     # "min.insync.replicas": 2
-    # "auto.create.topics.enable": false
+    "auto.create.topics.enable": false
     "log.retention.ms": "10000" # 10s
     "metrics.sample.window.ms": "5000" #5s
 
diff --git a/execution/infrastructure/kubernetes/rbac/role-binding.yaml b/execution/infrastructure/kubernetes/rbac/role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef2d0c015a1b42880f9652bc241950548a952792
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/role-binding.yaml
@@ -0,0 +1,11 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: theodolite
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: theodolite
+subjects:
+- kind: ServiceAccount
+  name: theodolite
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/rbac/role.yaml b/execution/infrastructure/kubernetes/rbac/role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..84ba14a8bc7a6eceb8a20596ede057ca2271b967
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/role.yaml
@@ -0,0 +1,41 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: theodolite
+rules:
+  - apiGroups:
+    - apps
+    resources:
+    - deployments
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - services
+    - pods
+    - servicemonitors
+    - configmaps
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - pods/exec
+    verbs:
+    - create
+    - get
+  - apiGroups:
+    - monitoring.coreos.com
+    resources:
+    - servicemonitors
+    verbs:
+    - delete
+    - list
+    - create
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/rbac/service-account.yaml b/execution/infrastructure/kubernetes/rbac/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7f33076e31ac53d02491c80fd61cdc5b241dfd7
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/service-account.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: theodolite
\ No newline at end of file
diff --git a/execution/lib/cli_parser.py b/execution/lib/cli_parser.py
index f785bce4f933622a99b4daaadeb483276d4956cd..de609bc55e21e9467a2b28168be6e478171cfddd 100644
--- a/execution/lib/cli_parser.py
+++ b/execution/lib/cli_parser.py
@@ -1,6 +1,7 @@
 import argparse
 import os
 
+
 def env_list_default(env, tf):
     """
     Makes a list from an environment string.
@@ -10,6 +11,40 @@ def env_list_default(env, tf):
         v = [tf(s) for s in v.split(',')]
     return v
 
+
+def key_values_to_dict(kvs):
+    """
+    Given a list with key values in form `Key=Value` it creates a dict from it.
+    """
+    my_dict = {}
+    for kv in kvs:
+        k, v = kv.split("=")
+        my_dict[k] = v
+    return my_dict
+
+
+def env_dict_default(env):
+    """
+    Makes a dict from an environment string.
+    """
+    v = os.environ.get(env)
+    if v is not None:
+        return key_values_to_dict(v.split(','))
+    else:
+        return dict()
+
+
+class StoreDictKeyPair(argparse.Action):
+    def __init__(self, option_strings, dest, nargs=None, **kwargs):
+        self._nargs = nargs
+        super(StoreDictKeyPair, self).__init__(
+            option_strings, dest, nargs=nargs, **kwargs)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        my_dict = key_values_to_dict(values)
+        setattr(namespace, self.dest, my_dict)
+
+
 def default_parser(description):
     """
     Returns the default parser that can be used for thodolite and run uc py
@@ -33,11 +68,6 @@ def default_parser(description):
                         metavar='<memory limit>',
                         default=os.environ.get('MEMORY_LIMIT', '4Gi'),
                         help='Kubernetes memory limit')
-    parser.add_argument('--commit-ms',
-                        metavar='<commit ms>',
-                        type=int,
-                        default=os.environ.get('COMMIT_MS', 100),
-                        help='Kafka Streams commit interval in milliseconds')
     parser.add_argument('--duration', '-d',
                         metavar='<duration>',
                         type=int,
@@ -50,20 +80,33 @@ def default_parser(description):
                         help='Defines the Kubernetes where the applications should run')
     parser.add_argument('--reset',
                         action="store_true",
+                        default=os.environ.get(
+                            'RESET', 'false').lower() == 'true',
                         help='Resets the environment before execution')
     parser.add_argument('--reset-only',
                         action="store_true",
+                        default=os.environ.get(
+                            'RESET_ONLY', 'false').lower() == 'true',
                         help='Only resets the environment. Ignores all other parameters')
     parser.add_argument('--prometheus',
                         metavar='<URL>',
-                        default=os.environ.get('PROMETHEUS_BASE_URL', 'http://localhost:9090'),
+                        default=os.environ.get(
+                            'PROMETHEUS_BASE_URL', 'http://localhost:9090'),
                         help='Defines where to find the prometheus instance')
     parser.add_argument('--path',
                         metavar='<path>',
                         default=os.environ.get('RESULT_PATH', 'results'),
                         help='A directory path for the results')
+    parser.add_argument("--configurations",
+                        metavar="KEY=VAL",
+                        dest="configurations",
+                        action=StoreDictKeyPair,
+                        nargs="+",
+                        default=env_dict_default('CONFIGURATIONS'),
+                        help='Defines the environment variables for the UC')
     return parser
 
+
 def benchmark_parser(description):
     """
     Parser for the overall benchmark execution
@@ -86,13 +129,21 @@ def benchmark_parser(description):
                         help='[mandatory] List of instances used in benchmarks')
     parser.add_argument('--domain-restriction',
                         action="store_true",
+                        default=os.environ.get(
+                            'DOMAIN_RESTRICTION', 'false').lower() == 'true',
                         help='To use domain restriction. For details see README')
     parser.add_argument('--search-strategy',
                         metavar='<strategy>',
                         default=os.environ.get('SEARCH_STRATEGY', 'default'),
                         help='The benchmarking search strategy. Can be set to default, linear-search or binary-search')
+    parser.add_argument('--threshold',
+                        type=int,
+                        metavar='<threshold>',
+                        default=os.environ.get('THRESHOLD', 2000),
+                        help='The threshold for the trend slop that the search strategies use to determine that a load could be handled')
     return parser
 
+
 def execution_parser(description):
     """
     Parser for executing one use case
diff --git a/execution/lib/trend_slope_computer.py b/execution/lib/trend_slope_computer.py
index 294226c35c0038a01804f7f5e8eb3a1e53c79b79..90ae26cfd275f53307e19532f047e5e0a9326d3a 100644
--- a/execution/lib/trend_slope_computer.py
+++ b/execution/lib/trend_slope_computer.py
@@ -2,7 +2,7 @@ from sklearn.linear_model import LinearRegression
 import pandas as pd
 import os
 
-def compute(directory, filename, warmup_sec, threshold):
+def compute(directory, filename, warmup_sec):
     df = pd.read_csv(os.path.join(directory, filename))
     input = df
     input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
@@ -16,4 +16,4 @@ def compute(directory, filename, warmup_sec, threshold):
 
     trend_slope = linear_regressor.coef_[0][0]
 
-    return trend_slope
\ No newline at end of file
+    return trend_slope
diff --git a/execution/run_uc.py b/execution/run_uc.py
index 6ebf797241b45a342214ea4dbd003e371f5bd828..a0fcdbb6d57e5dc67d18e69b7d07fcdbfa809307 100644
--- a/execution/run_uc.py
+++ b/execution/run_uc.py
@@ -24,7 +24,7 @@ def load_variables():
     parser = execution_parser(description='Run use case Programm')
     args = parser.parse_args()
     print(args)
-    if args.exp_id is None or args.uc is None or args.load is None or args.instances is None:
+    if (args.exp_id is None or args.uc is None or args.load is None or args.instances is None) and not args.reset_only:
         print('The options --exp-id, --uc, --load and --instances are mandatory.')
         print('Some might not be set!')
         sys.exit(1)
@@ -41,8 +41,8 @@ def initialize_kubernetes_api():
         config.load_kube_config()  # try using local config
     except config.config_exception.ConfigException as e:
         # load config from pod, if local config is not available
-        logging.debug('Failed loading local Kubernetes configuration,'
-                      + ' try from cluster')
+        logging.debug(
+            'Failed loading local Kubernetes configuration try from cluster')
         logging.debug(e)
         config.load_incluster_config()
 
@@ -58,8 +58,7 @@ def create_topics(topics):
     # Calling exec and waiting for response
     print('Create topics')
     for (topic, partitions) in topics:
-        print('Create topic ' + topic + ' with #' + str(partitions)
-              + ' partitions')
+        print(f'Create topic {topic} with #{partitions} partitions')
         exec_command = [
             '/bin/sh',
             '-c',
@@ -86,7 +85,7 @@ def load_yaml(file_path):
         with f:
             return yaml.safe_load(f)
     except Exception as e:
-        logging.error('Error opening file %s' % file_path)
+        logging.error('Error opening file %s', file_path)
         logging.error(e)
 
 
@@ -105,6 +104,15 @@ def load_yaml_files():
     return wg, app_svc, app_svc_monitor, app_jmx, app_deploy
 
 
+def replace_env_value(container, key, value):
+    """
+    Special method to replace in a container with kubernetes env values
+    the value of a given parameter.
+    """
+    next(filter(lambda x: x['name'] == key, container))[
+        'value'] = value
+
+
 def start_workload_generator(wg_yaml, dim_value, uc_id):
     """Starts the workload generator.
     :param wg_yaml: The yaml object for the workload generator.
@@ -118,47 +126,48 @@ def start_workload_generator(wg_yaml, dim_value, uc_id):
 
     num_sensors = dim_value
     wl_max_records = 150000
-    wl_instances = int(((num_sensors + (wl_max_records - 1)) / wl_max_records))
+    wl_instances = (num_sensors + wl_max_records - 1) // wl_max_records
 
     # set parameters special for uc 2
     if uc_id == '2':
         print('use uc2 stuff')
         num_nested_groups = dim_value
-        num_sensors = '4'
-        approx_num_sensors = int(num_sensors) ** num_nested_groups
-        wl_instances = int(
-            ((approx_num_sensors + wl_max_records - 1) / wl_max_records)
-        )
+        num_sensors = 4
+        approx_num_sensors = num_sensors ** num_nested_groups
+        wl_instances = (approx_num_sensors +
+                        wl_max_records - 1) // wl_max_records
 
     # Customize workload generator creations
     wg_yaml['spec']['replicas'] = wl_instances
     # Set used use case
     wg_containter = next(filter(
         lambda x: x['name'] == 'workload-generator', wg_yaml['spec']['template']['spec']['containers']))
-    wg_containter['image'] = 'theodolite/theodolite-uc' + uc_id + \
+    wg_containter['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id + \
         '-workload-generator:latest'
     # Set environment variables
 
-    next(filter(lambda x: x['name'] == 'NUM_SENSORS', wg_containter['env']))[
-        'value'] = str(num_sensors)
-    next(filter(lambda x: x['name'] == 'INSTANCES', wg_containter['env']))[
-        'value'] = str(wl_instances)
+    replace_env_value(wg_containter['env'], 'NUM_SENSORS', str(num_sensors))
+    replace_env_value(wg_containter['env'], 'INSTANCES', str(wl_instances))
+
     if uc_id == '2':  # Special configuration for uc2
-        next(filter(lambda x: x['name'] == 'NUM_NESTED_GROUPS', wg_containter['env']))[
-            'value'] = str(num_nested_groups)
+        replace_env_value(
+            wg_containter['env'], 'NUM_NESTED_GROUPS', str(num_nested_groups))
+
     try:
         wg_ss = appsApi.create_namespaced_deployment(
             namespace=namespace,
             body=wg_yaml
         )
-        print("Deployment '%s' created." % wg_ss.metadata.name)
+        print(f'Deployment {wg_ss.metadata.name} created.')
         return wg_ss
     except client.rest.ApiException as e:
-        print("Deployment creation error: %s" % e.reason)
+        print(f'Deployment creation error: {e.reason}')
         return wg_yaml
 
 
-def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instances, uc_id, commit_interval_ms, memory_limit, cpu_limit):
+def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml,
+                      instances, uc_id, memory_limit, cpu_limit,
+                      configurations):
     """Applies the service, service monitor, jmx config map and start the
     use case application.
 
@@ -168,9 +177,9 @@ def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instanc
     :param deploy_yaml: The yaml object for the application.
     :param int instances: Number of instances for use case application.
     :param string uc_id: The id of the use case to execute.
-    :param int commit_interval_ms: The commit interval in ms.
     :param string memory_limit: The memory limit for the application.
     :param string cpu_limit: The CPU limit for the application.
+    :param dict configurations: A dictionary with ENV variables for configurations.
     :return:
         The Service, ServiceMonitor, JMX ConfigMap and Deployment.
         In case the resource already exist/error the yaml object is returned.
@@ -183,10 +192,10 @@ def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instanc
     try:
         svc = coreApi.create_namespaced_service(
             namespace=namespace, body=svc_yaml)
-        print("Service '%s' created." % svc.metadata.name)
+        print(f'Service {svc.metadata.name} created.')
     except client.rest.ApiException as e:
         svc = svc_yaml
-        logging.error("Service creation error: %s" % e.reason)
+        logging.error("Service creation error: %s", e.reason)
 
     # Create custom object service monitor
     try:
@@ -197,39 +206,54 @@ def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instanc
             plural="servicemonitors",  # CustomResourceDef of ServiceMonitor
             body=svc_monitor_yaml,
         )
-        print("ServiceMonitor '%s' created." % svc_monitor['metadata']['name'])
+        print(f"ServiceMonitor '{svc_monitor['metadata']['name']}' created.")
     except client.rest.ApiException as e:
         svc_monitor = svc_monitor_yaml
-        logging.error("ServiceMonitor creation error: %s" % e.reason)
+        logging.error("ServiceMonitor creation error: %s", e.reason)
 
     # Apply jmx config map for aggregation service
     try:
         jmx_cm = coreApi.create_namespaced_config_map(
             namespace=namespace, body=jmx_yaml)
-        print("ConfigMap '%s' created." % jmx_cm.metadata.name)
+        print(f"ConfigMap '{jmx_cm.metadata.name}' created.")
     except client.rest.ApiException as e:
         jmx_cm = jmx_yaml
-        logging.error("ConfigMap creation error: %s" % e.reason)
+        logging.error("ConfigMap creation error: %s", e.reason)
 
     # Create deployment
     deploy_yaml['spec']['replicas'] = instances
     app_container = next(filter(
-        lambda x: x['name'] == 'uc-application', deploy_yaml['spec']['template']['spec']['containers']))
-    app_container['image'] = 'theodolite/theodolite-uc' + uc_id \
+        lambda x: x['name'] == 'uc-application',
+        deploy_yaml['spec']['template']['spec']['containers']))
+    app_container['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id \
         + '-kstreams-app:latest'
-    next(filter(lambda x: x['name'] == 'COMMIT_INTERVAL_MS', app_container['env']))[
-        'value'] = str(commit_interval_ms)
+
+    # Set configurations environment parameters for SPE
+    for k, v in configurations.items():
+        # check if environment variable is already definde in yaml
+        env = next(filter(lambda x: x['name'] == k,
+                          app_container['env']), None)
+        if env is not None:
+            env['value'] = v  # replace value
+        else:
+            # create new environment pair
+            conf = {'name': k, 'value': v}
+            app_container['env'].append(conf)
+
+    # Set resources in Kubernetes
     app_container['resources']['limits']['memory'] = memory_limit
     app_container['resources']['limits']['cpu'] = cpu_limit
+
+    # Deploy application
     try:
         app_deploy = appsApi.create_namespaced_deployment(
             namespace=namespace,
             body=deploy_yaml
         )
-        print("Deployment '%s' created." % app_deploy.metadata.name)
+        print(f"Deployment '{app_deploy.metadata.name}' created.")
     except client.rest.ApiException as e:
         app_deploy = deploy_yaml
-        logging.error("Deployment creation error: %s" % e.reason)
+        logging.error("Deployment creation error: %s", e.reason)
 
     return svc, svc_monitor, jmx_cm, app_deploy
 
@@ -243,7 +267,7 @@ def wait_execution(execution_minutes):
 
     for i in range(execution_minutes):
         time.sleep(60)
-        print(f"Executed: {i+1} minutes")
+        print(f'Executed: {i+1} minutes')
     print('Execution finished')
     return
 
@@ -258,7 +282,16 @@ def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prome
     :param int execution_minutes: How long the use case where executed.
     """
     print('Run evaluation function')
-    lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances, execution_minutes, prometheus_base_url, result_path)
+    try:
+        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances,
+                          execution_minutes, prometheus_base_url,
+                          result_path)
+    except Exception as e:
+        err_msg = 'Evaluation function failed'
+        print(err_msg)
+        logging.exception(err_msg)
+        print('Benchmark execution continues')
+
     return
 
 
@@ -310,13 +343,31 @@ def stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy):
             name=app_svc_monitor['metadata']['name'])
         print('Resource deleted')
     except Exception as e:
-        print("Error deleting service monitor")
+        print('Error deleting service monitor')
 
     print('Delete jmx config map')
     delete_resource(app_jmx, coreApi.delete_namespaced_config_map)
 
     print('Delete uc application')
     delete_resource(app_deploy, appsApi.delete_namespaced_deployment)
+
+    print('Check all pods deleted.')
+    while True:
+        # Wait bit for deletion
+        time.sleep(2)
+
+        # Count how many pod still need to be deleted
+        no_load = len(coreApi.list_namespaced_pod(
+            namespace, label_selector='app=titan-ccp-load-generator').items)
+        no_uc = len(coreApi.list_namespaced_pod(
+            namespace, label_selector='app=titan-ccp-aggregation').items)
+
+        # Check if all pods deleted
+        if no_load <= 0 and no_uc <= 0:
+            print('All pods deleted.')
+            break
+
+        print(f'#{no_load} load generator and #{no_uc} uc pods needs to be deleted')
     return
 
 
@@ -363,7 +414,7 @@ def delete_topics(topics):
                       stderr=True, stdin=False,
                       stdout=True, tty=False)
         if resp == '0':
-            print("Topics deleted")
+            print('Topics deleted')
             break
     return
 
@@ -455,7 +506,7 @@ def reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics):
     stop_lag_exporter()
 
 
-def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, commit_interval_ms, execution_minutes, prometheus_base_url, reset, ns, result_path, reset_only=False):
+def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, execution_minutes, prometheus_base_url, reset, ns, result_path, configurations, reset_only=False):
     """
     Main method to execute one time the benchmark for a given use case.
     Start workload generator/application -> execute -> analyse -> stop all
@@ -466,9 +517,9 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi
     :param int partitions: Number of partitions the kafka topics should have.
     :param string cpu_limit: Max CPU utilazation for application.
     :param string memory_limit: Max memory utilazation for application.
-    :param int commit_interval_ms: Kafka Streams commit interval in milliseconds
     :param int execution_minutes: How long to execute the benchmark.
     :param boolean reset: Flag for reset of cluster before execution.
+    :param dict configurations: Key value pairs for setting env variables of UC.
     :param boolean reset_only: Flag to only reset the application.
     """
     global namespace
@@ -514,9 +565,9 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi
         app_deploy,
         instances,
         uc_id,
-        commit_interval_ms,
         memory_limit,
-        cpu_limit)
+        cpu_limit,
+        configurations)
     print('---------------------')
 
     wait_execution(execution_minutes)
@@ -535,7 +586,7 @@ if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
     args = load_variables()
     print('---------------------')
-    main(args.exp_id, args.uc, args.load, args.instances,
-         args.partitions, args.cpu_limit, args.memory_limit,
-         args.commit_ms, args.duration, args.prometheus, args.reset,
-         args.namespace, args.path, args.reset_only)
+    main(args.exp_id, args.uc, args.load, args.instances, args.partitions,
+         args.cpu_limit, args.memory_limit, args.duration, args.prometheus,
+         args.reset, args.namespace, args.path, args.configurations,
+         args.reset_only)
diff --git a/execution/strategies/config.py b/execution/strategies/config.py
index 3741bcd5a8f025b0efc8bfb6ab53fdf08381ce9f..d4df97c18ae54c7c181ddf08264c013f9447350f 100644
--- a/execution/strategies/config.py
+++ b/execution/strategies/config.py
@@ -10,13 +10,14 @@ class ExperimentConfig:
     partitions: int
     cpu_limit: str
     memory_limit: str
-    kafka_streams_commit_interval_ms: int
     execution_minutes: int
     prometheus_base_url: str
     reset: bool
     namespace: str
     result_path: str
+    configurations: dict
     domain_restriction_strategy: object
     search_strategy: object
+    threshold: int
     subexperiment_executor: object
     subexperiment_evaluator: object
diff --git a/execution/strategies/strategies/config.py b/execution/strategies/strategies/config.py
index 3c6a15918ec8cf923b79e6f4f98564f983deac63..5c31f8c97a4085931cdfa1fa017d4e5909e21915 100644
--- a/execution/strategies/strategies/config.py
+++ b/execution/strategies/strategies/config.py
@@ -11,9 +11,9 @@ class SubexperimentConfig:
     partitions: int
     cpu_limit: str
     memory_limit: str
-    kafka_streams_commit_interval_ms: int
     execution_minutes: int
     prometheus_base_url: str
     reset: bool
     namespace: str
     result_path: str
+    configurations: dict
diff --git a/execution/strategies/strategies/search/binary_search_strategy.py b/execution/strategies/strategies/search/binary_search_strategy.py
index be7da54025c2f9fda1750d8197d3afd4055da790..46748cbda250597b3a7644522126268be4599293 100644
--- a/execution/strategies/strategies/search/binary_search_strategy.py
+++ b/execution/strategies/strategies/search/binary_search_strategy.py
@@ -5,26 +5,29 @@ from strategies.strategies.config import SubexperimentConfig
 def binary_search(config, dim_value, lower, upper, subexperiment_counter):
     if lower == upper:
         print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
         config.subexperiment_executor.execute(subexperiment_config)
-        result = config.subexperiment_evaluator.execute(subexperiment_config)
-        if result==1: # successful, the upper neighbor is assumed to also has been successful
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success: # successful, the upper neighbor is assumed to also has been successful
             return (lower, subexperiment_counter+1)
         else: # not successful
             return (lower+1, subexperiment_counter)
     elif lower+1==upper:
         print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
         config.subexperiment_executor.execute(subexperiment_config)
-        result = config.subexperiment_evaluator.execute(subexperiment_config)
-        if result==1: # minimal instances found
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success: # minimal instances found
             return (lower, subexperiment_counter)
         else: # not successful, check if lower+1 instances are sufficient
             print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[upper]}")
-            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path)
+            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
             config.subexperiment_executor.execute(subexperiment_config)
-            result = config.subexperiment_evaluator.execute(subexperiment_config)
-            if result == 1: # minimal instances found
+            success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                             config.threshold)
+            if success: # minimal instances found
                 return (upper, subexperiment_counter)
             else:
                 return (upper+1, subexperiment_counter)
@@ -32,10 +35,11 @@ def binary_search(config, dim_value, lower, upper, subexperiment_counter):
         # test mid
         mid=(upper+lower)//2
         print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[mid]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
         config.subexperiment_executor.execute(subexperiment_config)
-        result = config.subexperiment_evaluator.execute(subexperiment_config)
-        if result == 1: # success -> search in (lower, mid-1)
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success: # success -> search in (lower, mid-1)
             return binary_search(config, dim_value, lower, mid-1, subexperiment_counter+1)
         else: # not success -> search in (mid+1, upper)
             return binary_search(config, dim_value, mid+1, upper, subexperiment_counter+1)
diff --git a/execution/strategies/strategies/search/check_all_strategy.py b/execution/strategies/strategies/search/check_all_strategy.py
index 7d8ea605707131d19a023671a77b8f22647d6f51..0861945113b829fa79317d8a1a6312b4d6e4f71d 100644
--- a/execution/strategies/strategies/search/check_all_strategy.py
+++ b/execution/strategies/strategies/search/check_all_strategy.py
@@ -2,23 +2,30 @@
 import os
 from strategies.strategies.config import SubexperimentConfig
 
+
 def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    new_lower_replicas_bound_index=lower_replicas_bound_index
-    new_lower_replicas_bound_found=False
-    subexperiments_total=len(config.dim_values)*len(config.replicass)
+    new_lower_replicas_bound_index = lower_replicas_bound_index
+    new_lower_replicas_bound_found = False
+    subexperiments_total = len(config.dim_values) * len(config.replicass)
     while lower_replicas_bound_index < len(config.replicass):
-        subexperiment_counter+=1
-        dim_value=config.dim_values[dim_value_index]
-        replicas=config.replicass[lower_replicas_bound_index]
-        print(f"Run subexperiment {subexperiment_counter} of {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
+        subexperiment_counter += 1
+        dim_value = config.dim_values[dim_value_index]
+        replicas = config.replicass[lower_replicas_bound_index]
+        print(
+            f"Run subexperiment {subexperiment_counter} of {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
 
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path)
+        subexperiment_config = SubexperimentConfig(
+            config.use_case, config.exp_id, subexperiment_counter, dim_value,
+            replicas, config.partitions, config.cpu_limit, config.memory_limit,
+            config.execution_minutes, config.prometheus_base_url, config.reset,
+            config.namespace, config.result_path, config.configurations)
 
         config.subexperiment_executor.execute(subexperiment_config)
 
-        result = config.subexperiment_evaluator.execute(subexperiment_config) == 1
-        if result == 1 and not new_lower_replicas_bound_found:
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success and not new_lower_replicas_bound_found:
             new_lower_replicas_bound_found = True
             new_lower_replicas_bound_index = lower_replicas_bound_index
-        lower_replicas_bound_index+=1
+        lower_replicas_bound_index += 1
     return (new_lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/linear_search_strategy.py b/execution/strategies/strategies/search/linear_search_strategy.py
index c4f57c0d9bd82467a5917bbf95fe330c7bd81a58..8e777303742e54cf2a11a1bde60e95b8aa85489d 100644
--- a/execution/strategies/strategies/search/linear_search_strategy.py
+++ b/execution/strategies/strategies/search/linear_search_strategy.py
@@ -11,11 +11,12 @@ def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_c
         replicas=config.replicass[lower_replicas_bound_index]
         print(f"Run subexperiment {subexperiment_counter} from at most {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
 
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
 
         config.subexperiment_executor.execute(subexperiment_config)
-        result = config.subexperiment_evaluator.execute(subexperiment_config)
-        if result == 1:
+        success = config.subexperiment_evaluator.execute(subexperiment_config,
+                                                         config.threshold)
+        if success:
             return (lower_replicas_bound_index, subexperiment_counter)
         else:
             lower_replicas_bound_index+=1
diff --git a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
index 4e46d2d6ccabb601d9df373a540d23e73d60be28..30188de837746b76113ec635ca77fadc3a91cb92 100644
--- a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
+++ b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
@@ -1,17 +1,29 @@
-import os
-import sys
-import os
 import lib.trend_slope_computer as trend_slope_computer
+import logging
+import os
 
-THRESHOLD = 2000
 WARMUP_SEC = 60
 
-def execute(config):
+def execute(config, threshold):
+    """
+    Check the trend slope of the totallag of the subexperiment if it comes below
+    the threshold.
+
+    :param config: Configuration of the subexperiment.
+    :param threshold: The threshold the trendslope need to come below.
+    """
     cwd = f'{os.getcwd()}/{config.result_path}'
     file = f"exp{config.exp_id}_uc{config.use_case}_{config.dim_value}_{config.replicas}_totallag.csv"
 
-    trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC, THRESHOLD)
+    try:
+        trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC)
+    except Exception as e:
+        err_msg = 'Computing trend slope failed'
+        print(err_msg)
+        logging.exception(err_msg)
+        print('Mark this subexperiment as not successful and continue benchmark')
+        return False
 
     print(f"Trend Slope: {trend_slope}")
-    success = 0 if trend_slope > THRESHOLD else 1
-    return success
+
+    return trend_slope < threshold
diff --git a/execution/strategies/subexperiment_execution/subexperiment_executor.py b/execution/strategies/subexperiment_execution/subexperiment_executor.py
index 3f7af08b7a52d70609f000a34a47c088574ddfd6..6931dacfc72081cbe112c4d6d1003703ba42c526 100644
--- a/execution/strategies/subexperiment_execution/subexperiment_executor.py
+++ b/execution/strategies/subexperiment_execution/subexperiment_executor.py
@@ -12,9 +12,9 @@ def execute(subexperiment_config):
         partitions=subexperiment_config.partitions,
         cpu_limit=subexperiment_config.cpu_limit,
         memory_limit=subexperiment_config.memory_limit,
-        commit_interval_ms=subexperiment_config.kafka_streams_commit_interval_ms,
         execution_minutes=int(subexperiment_config.execution_minutes),
         prometheus_base_url=subexperiment_config.prometheus_base_url,
         reset=subexperiment_config.reset,
         ns=subexperiment_config.namespace,
-        result_path=subexperiment_config.result_path)
+        result_path=subexperiment_config.result_path,
+        configurations=subexperiment_config.configurations)
diff --git a/execution/theodolite.py b/execution/theodolite.py
index 22be2f69ab81d81b7aac7717041604cd368e771f..bd273c4405e2a406b5b5537e084957625c19aa96 100755
--- a/execution/theodolite.py
+++ b/execution/theodolite.py
@@ -4,6 +4,7 @@ import argparse
 from lib.cli_parser import benchmark_parser
 import logging  # logging
 import os
+import run_uc
 import sys
 from strategies.config import ExperimentConfig
 import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
@@ -22,7 +23,7 @@ def load_variables():
     parser = benchmark_parser("Run theodolite benchmarking")
     args = parser.parse_args()
     print(args)
-    if args.uc is None or args.loads is None or args.instances_list is None:
+    if (args.uc is None or args.loads is None or args.instances_list is None) and not args.reset_only:
         print('The options --uc, --loads and --instances are mandatory.')
         print('Some might not be set!')
         sys.exit(1)
@@ -30,10 +31,11 @@ def load_variables():
 
 
 def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
-         commit_ms, duration, domain_restriction, search_strategy,
-         prometheus_base_url, reset, namespace, result_path):
+         duration, domain_restriction, search_strategy, threshold,
+         prometheus_base_url, reset, namespace, result_path, configurations):
 
-    print(f"Domain restriction of search space activated: {domain_restriction}")
+    print(
+        f"Domain restriction of search space activated: {domain_restriction}")
     print(f"Chosen search strategy: {search_strategy}")
 
     counter_path = f"{result_path}/exp_counter.txt"
@@ -49,17 +51,17 @@ def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
     # Store metadata
     separator = ","
     lines = [
-            f"UC={uc}\n",
-            f"DIM_VALUES={separator.join(map(str, loads))}\n",
-            f"REPLICAS={separator.join(map(str, instances_list))}\n",
-            f"PARTITIONS={partitions}\n",
-            f"CPU_LIMIT={cpu_limit}\n",
-            f"MEMORY_LIMIT={memory_limit}\n",
-            f"KAFKA_STREAMS_COMMIT_INTERVAL_MS={commit_ms}\n",
-            f"EXECUTION_MINUTES={duration}\n",
-            f"DOMAIN_RESTRICTION={domain_restriction}\n",
-            f"SEARCH_STRATEGY={search_strategy}"
-            ]
+        f'UC={uc}\n',
+        f'DIM_VALUES={separator.join(map(str, loads))}\n',
+        f'REPLICAS={separator.join(map(str, instances_list))}\n',
+        f'PARTITIONS={partitions}\n',
+        f'CPU_LIMIT={cpu_limit}\n',
+        f'MEMORY_LIMIT={memory_limit}\n',
+        f'EXECUTION_MINUTES={duration}\n',
+        f'DOMAIN_RESTRICTION={domain_restriction}\n',
+        f'SEARCH_STRATEGY={search_strategy}\n',
+        f'CONFIGURATIONS={configurations}'
+    ]
     with open(f"{result_path}/exp{exp_id}_uc{uc}_meta.txt", "w") as stream:
         stream.writelines(lines)
 
@@ -67,7 +69,7 @@ def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
         write_stream.write(str(exp_id + 1))
 
     domain_restriction_strategy = None
-    search_strategy = None
+    search_strategy_method = None
 
     # Select domain restriction
     if domain_restriction:
@@ -79,13 +81,15 @@ def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
 
     # select search strategy
     if search_strategy == "linear-search":
-        print(f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
-        search_strategy = linear_search_strategy
+        print(
+            f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
+        search_strategy_method = linear_search_strategy
     elif search_strategy == "binary-search":
-        search_strategy = binary_search_strategy
+        search_strategy_method = binary_search_strategy
     else:
-        print(f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
-        search_strategy = check_all_strategy
+        print(
+            f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
+        search_strategy_method = check_all_strategy
 
     experiment_config = ExperimentConfig(
         use_case=uc,
@@ -95,14 +99,15 @@ def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
         partitions=partitions,
         cpu_limit=cpu_limit,
         memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=commit_ms,
         execution_minutes=duration,
         prometheus_base_url=prometheus_base_url,
         reset=reset,
         namespace=namespace,
+        configurations=configurations,
         result_path=result_path,
         domain_restriction_strategy=domain_restriction_strategy,
-        search_strategy=search_strategy,
+        search_strategy=search_strategy_method,
+        threshold=threshold,
         subexperiment_executor=subexperiment_executor,
         subexperiment_evaluator=subexperiment_evaluator)
 
@@ -113,7 +118,13 @@ def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
 if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
     args = load_variables()
-    main(args.uc, args.loads, args.instances_list, args.partitions, args.cpu_limit,
-         args.memory_limit, args.commit_ms, args.duration,
-         args.domain_restriction, args.search_strategy, args.prometheus,
-         args.reset, args.namespace, args.path)
+    if args.reset_only:
+        print('Only reset the cluster')
+        run_uc.main(None, None, None, None, None, None, None, None, None,
+                    None, args.namespace, None, None, reset_only=True)
+    else:
+        main(args.uc, args.loads, args.instances_list, args.partitions,
+             args.cpu_limit, args.memory_limit, args.duration,
+             args.domain_restriction, args.search_strategy,
+             args.threshold, args.prometheus, args.reset, args.namespace,
+             args.path, args.configurations)
diff --git a/execution/theodolite.yaml b/execution/theodolite.yaml
index 1c1ba6a1f3d9119dddd4668c27e1b1a10291895e..68d53386bcf5e77ce08d964f3c04eb000794575c 100644
--- a/execution/theodolite.yaml
+++ b/execution/theodolite.yaml
@@ -14,65 +14,38 @@ spec:
           image: bvonheid/theodolite:latest
           # imagePullPolicy: Never # Used to pull "own" local image
           env:
-            - name: UC
+            - name: UC # mandatory
               value: "1"
-            - name: LOADS
-              value: "13206, 19635"
-            - name: INSTANCES
-              value: "1, 2"
-            - name: DURATION
-              value: "3"
-            - name: PARTITIONS
-              value: "30"
-            # - name: COMMIT_MS
-            #   value: ""
+            - name: LOADS # mandatory
+              value: "100000, 200000"
+            - name: INSTANCES # mandatory
+              value: "1, 2, 3"
+            # - name: DURATION
+            #   value: "5"
+            # - name: PARTITIONS
+            #   value: "40"
+            # - name: DOMAIN_RESTRICTION
+            #   value: "True"
             # - name: SEARCH_STRATEGY
-            #   value: ""
+            #   value: "linear-search"
             # - name: CPU_LIMIT
-            #   value: ""
+            #   value: "1000m"
             # - name: MEMORY_LIMIT
-            #   value: ""
+            #   value: "4Gi"
             - name: PROMETHEUS_BASE_URL
               value: "http://prometheus-operated:9090"
             # - name: NAMESPACE
             #   value: "default"
+            # - name: CONFIGURATIONS
+            #   value: "COMMIT_INTERVAL_MS=100, NUM_STREAM_THREADS=1"
             - name: RESULT_PATH
               value: "results"
-            - name: PYTHONUNBUFFERED
+            - name: PYTHONUNBUFFERED # Enable logs in Kubernetes
               value: "1"
           volumeMounts:
             - mountPath: "/app/results"
               name: theodolite-pv-storage
       restartPolicy: Never
+      # Uncomment if RBAC is enabled and configured
+      # serviceAccountName: theodolite
   backoffLimit: 4
-
-# ---
-# apiVersion: v1
-# kind: ServiceAccount
-# metadata:
-#   name: theodolite
-# ---
-# apiVersion: rbac.authorization.k8s.io/v1
-# kind: Role
-# metadata:
-#   name: modify-pods
-# rules:
-#   - apiGroups: [""]
-#     resources:
-#       - pods
-#     verbs:
-#       - get
-#       - list
-#       - delete
-# ---
-# apiVersion: rbac.authorization.k8s.io/v1
-# kind: RoleBinding
-# metadata:
-#   name: modify-pods-to-sa
-# subjects:
-#   - kind: ServiceAccount
-#     name: theodolite
-# roleRef:
-#   kind: Role
-#   name: modify-pods
-#   apiGroup: rbac.authorization.k8s.io
diff --git a/execution/uc-application/base/aggregation-deployment.yaml b/execution/uc-application/base/aggregation-deployment.yaml
index 81da3eea7688f5d3b3145092d91cb8502e6ad87b..07732ca1dd1e6b2b06f098dfb10a53d38e8d5cae 100644
--- a/execution/uc-application/base/aggregation-deployment.yaml
+++ b/execution/uc-application/base/aggregation-deployment.yaml
@@ -20,14 +20,14 @@ spec:
         - containerPort: 5555
           name: jmx
         env:
-        - name: COMMIT_INTERVAL_MS
-          value: "100"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: SCHEMA_REGISTRY_URL
           value: "http://my-confluent-cp-schema-registry:8081"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        - name: COMMIT_INTERVAL_MS # Set as default for the applications
+          value: "100"
         resources:
           limits:
             memory: 4Gi
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
deleted file mode 100644
index 29953ea141f55e3b8fc691d31b5ca8816d89fa87..0000000000000000000000000000000000000000
Binary files a/gradle/wrapper/gradle-wrapper.jar and /dev/null differ
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java b/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
deleted file mode 100644
index ab6f08c017bb78a72c4896d766b38f7b8485c7fb..0000000000000000000000000000000000000000
--- a/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package theodolite.uc3.application;
-
-/**
- * Keys to access configuration parameters.
- */
-public final class ConfigurationKeys {
-
-  public static final String APPLICATION_NAME = "application.name";
-
-  public static final String APPLICATION_VERSION = "application.version";
-
-  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
-
-  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
-
-  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
-
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
-  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
-
-  private ConfigurationKeys() {
-  }
-
-}