diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 05ea0e7d9821988ca034a1fa8f28c4ec1a224983..a8bf42eddaa0b4896d853b7935040774f300353b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,244 +1,21 @@
-image: openjdk:11-jdk
-
-# Disable the Gradle daemon for Continuous Integration servers as correctness
-# is usually a priority over speed in CI environments. Using a fresh
-# runtime for each build is more reliable since the runtime is completely
-# isolated from any previous builds.
-variables:
-  GRADLE_OPTS: "-Dorg.gradle.daemon=false"
-
-cache:
-  paths:
-    - .gradle
-
-before_script:
-  - export GRADLE_USER_HOME=`pwd`/.gradle
+workflow:
+  rules:
+    - if: $CI_MERGE_REQUEST_ID
+      when: never
+    - when: always
 
 stages:
-  - build
-  - test
-  - check
-  - deploy
-
-build:
-  stage: build
-  tags:
-    - exec-docker
-  script: ./gradlew --build-cache assemble
-  artifacts:
-    paths:
-      - "build/libs/*.jar"
-      - "*/build/distributions/*.tar"
-    expire_in: 1 day
-
-test:
-  stage: test
-  tags:
-    - exec-docker
-  script: ./gradlew test --continue
-  artifacts:
-    reports:
-      junit:
-        - "**/build/test-results/test/TEST-*.xml"
-
-checkstyle:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew checkstyle --continue
-  artifacts:
-    paths:
-      - "*/build/reports/checkstyle/main.html"
-    when: on_failure
-    expire_in: 1 day
-
-pmd:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew pmd --continue
-  artifacts:
-    paths:
-      - "*/build/reports/pmd/*.html"
-    when: on_failure
-    expire_in: 1 day
-
-spotbugs:
-  stage: check
-  tags:
-    - exec-docker
-  script: ./gradlew spotbugs --continue
-  artifacts:
-    paths:
-      - "*/build/reports/spotbugs/*.html"
-    when: on_failure
-    expire_in: 1 day
+  - triggers
 
-
-.deploy:
-  stage: deploy
-  tags:
-    - exec-dind
-  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
-  # for image usage and settings for building with TLS and docker in docker
-  image: docker:19.03.1
-  services:
-    - docker:19.03.1-dind
-  variables:
-    DOCKER_TLS_CERTDIR: "/certs"
-  script:
-    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
-    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
-    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
-    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
-    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
-    - docker logout
+benchmarks:
+  stage: triggers
+  trigger:
+    include: benchmarks/.gitlab-ci.yml
+    strategy: depend
   rules:
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc1-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc1-kstreams-app"
-    JAVA_PROJECT_NAME: "uc1-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc1-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc2-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc2-kstreams-app"
-    JAVA_PROJECT_NAME: "uc2-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc2-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc3-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc3-kstreams-app"
-    JAVA_PROJECT_NAME: "uc3-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc3-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc4-kstreams-app:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc4-kstreams-app"
-    JAVA_PROJECT_NAME: "uc4-application"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc4-application/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc1-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc1-workload-generator"
-    JAVA_PROJECT_NAME: "uc1-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc1-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc2-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc2-workload-generator"
-    JAVA_PROJECT_NAME: "uc2-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc2-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc3-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc3-workload-generator"
-    JAVA_PROJECT_NAME: "uc3-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
-    - changes:
-      - uc3-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
-      allow_failure: true
-
-deploy-uc4-workload-generator:
-  extends: .deploy
-  variables:
-    IMAGE_NAME: "theodolite-uc4-workload-generator"
-    JAVA_PROJECT_NAME: "uc4-workload-generator"
-  rules: # hope this can be simplified soon, see #51
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
-      when: always
+    - if: "$CI_COMMIT_TAG"
     - changes:
-      - uc4-workload-generator/**/*
-      - application-kafkastreams-commons/**/*
-      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: always
-    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
-      when: manual
+      - benchmarks/*
+    - when: manual
       allow_failure: true
+  
\ No newline at end of file
diff --git a/README.md b/README.md
index a9de5f63d46019d8c4a0c0c0a880658e0f321a48..8969a283dbcd252ba0901709ec1de7b6726dda9e 100644
--- a/README.md
+++ b/README.md
@@ -11,9 +11,9 @@ Theodolite contains 4 application benchmarks, which are based on typical use cas
 
 ## Theodolite Execution Framework
 
-Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys as components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
+Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
 
 
 ## Theodolite Analysis Tools
 
-Theodolite's benchmarking method create a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+Theodolite's benchmarking method creates a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
deleted file mode 100644
index 8c758c24444ea9c590c364063a397f9b7bfec8f9..0000000000000000000000000000000000000000
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
+++ /dev/null
@@ -1,156 +0,0 @@
-package theodolite.commons.kafkastreams;
-
-import java.util.Objects;
-import java.util.Properties;
-import org.apache.kafka.streams.KafkaStreams;
-import org.apache.kafka.streams.StreamsConfig;
-import org.apache.kafka.streams.Topology;
-import titan.ccp.common.kafka.streams.PropertiesBuilder;
-
-/**
- * Builder for the Kafka Streams configuration.
- */
-public abstract class KafkaStreamsBuilder {
-
-  // Kafkastreams application specific
-  protected String schemaRegistryUrl; // NOPMD for use in subclass
-
-  private String applicationName; // NOPMD
-  private String applicationVersion; // NOPMD
-  private String bootstrapServers; // NOPMD
-  private int numThreads = -1; // NOPMD
-  private int commitIntervalMs = -1; // NOPMD
-  private int cacheMaxBytesBuff = -1; // NOPMD
-
-  /**
-   * Sets the application name for the {@code KafkaStreams} application. It is used to create the
-   * application ID.
-   *
-   * @param applicationName Name of the application.
-   * @return
-   */
-  public KafkaStreamsBuilder applicationName(final String applicationName) {
-    this.applicationName = applicationName;
-    return this;
-  }
-
-  /**
-   * Sets the application version for the {@code KafkaStreams} application. It is used to create the
-   * application ID.
-   *
-   * @param applicationVersion Version of the application.
-   * @return
-   */
-  public KafkaStreamsBuilder applicationVersion(final String applicationVersion) {
-    this.applicationVersion = applicationVersion;
-    return this;
-  }
-
-  /**
-   * Sets the bootstrap servers for the {@code KafkaStreams} application.
-   *
-   * @param bootstrapServers String for a bootstrap server.
-   * @return
-   */
-  public KafkaStreamsBuilder bootstrapServers(final String bootstrapServers) {
-    this.bootstrapServers = bootstrapServers;
-    return this;
-  }
-
-  /**
-   * Sets the URL for the schema registry.
-   *
-   * @param url The URL of the schema registry.
-   * @return
-   */
-  public KafkaStreamsBuilder schemaRegistry(final String url) {
-    this.schemaRegistryUrl = url;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
-   * one for using the default.
-   *
-   * @param numThreads Number of threads. -1 for using the default.
-   * @return
-   */
-  public KafkaStreamsBuilder numThreads(final int numThreads) {
-    if (numThreads < -1 || numThreads == 0) {
-      throw new IllegalArgumentException("Number of threads must be greater 0 or -1.");
-    }
-    this.numThreads = numThreads;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for the frequency with which to save the position (offsets in
-   * source topics) of tasks (commit.interval.ms). Must be zero for processing all record, for
-   * example, when processing bulks of records. Can be minus one for using the default.
-   *
-   * @param commitIntervalMs Frequency with which to save the position of tasks. In ms, -1 for using
-   *        the default.
-   * @return
-   */
-  public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
-    if (commitIntervalMs < -1) {
-      throw new IllegalArgumentException("Commit interval must be greater or equal -1.");
-    }
-    this.commitIntervalMs = commitIntervalMs;
-    return this;
-  }
-
-  /**
-   * Sets the Kafka Streams property for maximum number of memory bytes to be used for record caches
-   * across all threads (cache.max.bytes.buffering). Must be zero for processing all record, for
-   * example, when processing bulks of records. Can be minus one for using the default.
-   *
-   * @param cacheMaxBytesBuffering Number of memory bytes to be used for record caches across all
-   *        threads. -1 for using the default.
-   * @return
-   */
-  public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
-    if (cacheMaxBytesBuffering < -1) {
-      throw new IllegalArgumentException("Cache max bytes buffering must be greater or equal -1.");
-    }
-    this.cacheMaxBytesBuff = cacheMaxBytesBuffering;
-    return this;
-  }
-
-  /**
-   * Method to implement a {@link Topology} for a {@code KafkaStreams} application.
-   *
-   * @return A {@code Topology} for a {@code KafkaStreams} application.
-   */
-  protected abstract Topology buildTopology();
-
-  /**
-   * Build the {@link Properties} for a {@code KafkaStreams} application.
-   *
-   * @return A {@code Properties} object.
-   */
-  protected Properties buildProperties() {
-    return PropertiesBuilder
-        .bootstrapServers(this.bootstrapServers)
-        .applicationId(this.applicationName + '-' + this.applicationVersion)
-        .set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
-        .set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
-        .set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
-        .build();
-  }
-
-  /**
-   * Builds the {@link KafkaStreams} instance.
-   */
-  public KafkaStreams build() {
-    // Check for required attributes for building properties.
-    Objects.requireNonNull(this.applicationName, "Application name has not been set.");
-    Objects.requireNonNull(this.applicationVersion, "Application version has not been set.");
-    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
-    Objects.requireNonNull(this.schemaRegistryUrl, "Schema registry has not been set.");
-
-    // Create the Kafka streams instance.
-    return new KafkaStreams(this.buildTopology(), this.buildProperties());
-  }
-
-}
diff --git a/application-kafkastreams-commons/src/test/java/.gitkeep b/application-kafkastreams-commons/src/test/java/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/benchmarks/.gitlab-ci.yml b/benchmarks/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1a4d354701459d4730dab398e0210ab9189d7ad3
--- /dev/null
+++ b/benchmarks/.gitlab-ci.yml
@@ -0,0 +1,414 @@
+image: openjdk:11-jdk
+
+# Disable the Gradle daemon for Continuous Integration servers as correctness
+# is usually a priority over speed in CI environments. Using a fresh
+# runtime for each build is more reliable since the runtime is completely
+# isolated from any previous builds.
+variables:
+  GRADLE_OPTS: "-Dorg.gradle.daemon=false"
+
+cache:
+  paths:
+    - .gradle
+
+before_script:
+  - cd benchmarks
+  - export GRADLE_USER_HOME=`pwd`/.gradle
+
+stages:
+  - build
+  - test
+  - check
+  - deploy
+
+build:
+  stage: build
+  tags:
+    - exec-docker
+  script: ./gradlew --build-cache assemble
+  artifacts:
+    paths:
+      - "benchmarks/build/libs/*.jar"
+      - "benchmarks/*/build/distributions/*.tar"
+    expire_in: 1 day
+
+test:
+  stage: test
+  tags:
+    - exec-docker
+  script: ./gradlew test --continue
+  artifacts:
+    reports:
+      junit:
+        - "benchmarks/**/build/test-results/test/TEST-*.xml"
+
+checkstyle:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew checkstyle --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/checkstyle/main.html"
+    when: on_failure
+    expire_in: 1 day
+
+pmd:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew pmd --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/pmd/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+spotbugs:
+  stage: check
+  tags:
+    - exec-docker
+  script: ./gradlew spotbugs --continue
+  artifacts:
+    paths:
+      - "benchmarks/*/build/reports/spotbugs/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+
+.deploy:
+  stage: deploy
+  tags:
+    - exec-dind
+  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
+  # for image usage and settings for building with TLS and docker in docker
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
+    - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
+    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
+    - docker logout
+  rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+.deploy-ghcr:
+  stage: deploy
+  tags:
+    - exec-dind
+  # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
+  # for image usage and settings for building with TLS and docker in docker
+  image: docker:19.03.1
+  services:
+    - docker:19.03.1-dind
+  variables:
+    DOCKER_TLS_CERTDIR: "/certs"
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
+    - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin
+    - docker push ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME
+    - docker logout
+  rules:
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc1-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc2-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc3-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc4-kstreams-app:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-application/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc1-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc1-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc2-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc2-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc3-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc3-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-ghcr-uc4-workload-generator:
+  extends: .deploy-ghcr
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - benchmarks/uc4-workload-generator/**/*
+      - benchmarks/application-kafkastreams-commons/**/*
+      if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
diff --git a/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from .settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/.settings/org.eclipse.jdt.ui.prefs
diff --git a/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from .settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from .settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/application-kafkastreams-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/application-kafkastreams-commons/build.gradle b/benchmarks/application-kafkastreams-commons/build.gradle
similarity index 100%
rename from application-kafkastreams-commons/build.gradle
rename to benchmarks/application-kafkastreams-commons/build.gradle
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
similarity index 85%
rename from application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
rename to benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
index 6302e4c69904aaf57e3f936ee9ad0ead11414a8d..ca1838b84a4f1b3ddf11ad4dea8e34792371974b 100644
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
+++ b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
@@ -9,12 +9,6 @@ public final class ConfigurationKeys {
 
   public static final String APPLICATION_VERSION = "application.version";
 
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
   public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
 
   public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
diff --git a/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..ef1ece3549b1aabf60a4ff5b15028b7e50288cd9
--- /dev/null
+++ b/benchmarks/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
@@ -0,0 +1,123 @@
+package theodolite.commons.kafkastreams;
+
+import java.util.Properties;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.StreamsConfig;
+import org.apache.kafka.streams.Topology;
+import titan.ccp.common.kafka.streams.PropertiesBuilder;
+
+/**
+ * Builder for the Kafka Streams configuration.
+ */
+public abstract class KafkaStreamsBuilder {
+
+  // Kafka Streams application specific
+  protected final String schemaRegistryUrl; // NOPMD for use in subclass
+  protected final String inputTopic; // NOPMD for use in subclass
+
+  private final Configuration config;
+
+  private final String applicationName; // NOPMD
+  private final String applicationVersion; // NOPMD
+  private final String bootstrapServers; // NOPMD
+
+  /**
+   * Construct a new Build object for a Kafka Streams application.
+   *
+   * @param config Contains the key value pairs for configuration.
+   */
+  public KafkaStreamsBuilder(final Configuration config) {
+    this.config = config;
+    this.applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
+    this.applicationVersion = this.config.getString(ConfigurationKeys.APPLICATION_VERSION);
+    this.bootstrapServers = this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+    this.schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
+    this.inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+  }
+
+  /**
+   * Checks if the given key is contained in the configurations and sets it in the properties.
+   *
+   * @param <T> Type of the value for given key
+   * @param propBuilder Object where to set this property.
+   * @param key The key to check and set the property.
+   * @param valueGetter Method to get the value from with given key.
+   * @param condition for setting the property.
+   */
+  private <T> void setOptionalProperty(final PropertiesBuilder propBuilder,
+      final String key,
+      final Function<String, T> valueGetter,
+      final Predicate<T> condition) {
+    if (this.config.containsKey(key)) {
+      final T value = valueGetter.apply(key);
+      propBuilder.set(key, value, condition);
+    }
+  }
+
+  /**
+   * Build the {@link Properties} for a {@code KafkaStreams} application.
+   *
+   * @return A {@code Properties} object.
+   */
+  protected Properties buildProperties() {
+    // required configuration
+    final PropertiesBuilder propBuilder = PropertiesBuilder
+        .bootstrapServers(this.bootstrapServers)
+        .applicationId(this.applicationName + '-' + this.applicationVersion);
+
+    // optional configurations
+    this.setOptionalProperty(propBuilder, StreamsConfig.ACCEPTABLE_RECOVERY_LAG_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG,
+        this.config::getInt, p -> p > 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG,
+        this.config::getInt,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.COMMIT_INTERVAL_MS_CONFIG,
+        this.config::getInt, p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.MAX_TASK_IDLE_MS_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.MAX_WARMUP_REPLICAS_CONFIG,
+        this.config::getInt, p -> p >= 1);
+    this.setOptionalProperty(propBuilder, StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG,
+        this.config::getInt, p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.NUM_STREAM_THREADS_CONFIG,
+        this.config::getInt, p -> p > 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.POLL_MS_CONFIG,
+        this.config::getLong,
+        p -> p >= 0);
+    this.setOptionalProperty(propBuilder, StreamsConfig.PROCESSING_GUARANTEE_CONFIG,
+        this.config::getString, p -> StreamsConfig.AT_LEAST_ONCE.equals(p)
+            || StreamsConfig.EXACTLY_ONCE.equals(p) || StreamsConfig.EXACTLY_ONCE_BETA.equals(p));
+    this.setOptionalProperty(propBuilder, StreamsConfig.REPLICATION_FACTOR_CONFIG,
+        this.config::getInt, p -> p >= 0);
+
+    if (this.config.containsKey(StreamsConfig.TOPOLOGY_OPTIMIZATION)
+        && this.config.getBoolean(StreamsConfig.TOPOLOGY_OPTIMIZATION)) {
+      propBuilder.set(StreamsConfig.TOPOLOGY_OPTIMIZATION, StreamsConfig.OPTIMIZE);
+    }
+
+    return propBuilder.build();
+  }
+
+  /**
+   * Method to implement a {@link Topology} for a {@code KafkaStreams} application.
+   *
+   * @return A {@code Topology} for a {@code KafkaStreams} application.
+   */
+  protected abstract Topology buildTopology();
+
+  /**
+   * Builds the {@link KafkaStreams} instance.
+   */
+  public KafkaStreams build() {
+    // Create the Kafka streams instance.
+    return new KafkaStreams(this.buildTopology(), this.buildProperties());
+  }
+
+}
diff --git a/build.gradle b/benchmarks/build.gradle
similarity index 91%
rename from build.gradle
rename to benchmarks/build.gradle
index 1e388cb9665b43e004a1854248acc04e1cda387c..3cb86b68e9d37c53572c6611fad1057b5505e9cc 100644
--- a/build.gradle
+++ b/benchmarks/build.gradle
@@ -6,7 +6,7 @@ buildscript {
     }
   }
   dependencies {
-    classpath "gradle.plugin.com.github.spotbugs:spotbugs-gradle-plugin:1.6.3"
+    classpath "gradle.plugin.com.github.spotbugs.snom:spotbugs-gradle-plugin:4.6.0"
   }
 }
 
@@ -65,6 +65,7 @@ configure(useCaseApplications) {
       implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
       implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
       implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
+      implementation 'com.google.code.gson:gson:2.8.2'
       implementation 'com.google.guava:guava:24.1-jre'
       implementation 'org.jctools:jctools-core:2.1.1'
       implementation 'org.slf4j:slf4j-simple:1.7.25'
@@ -100,6 +101,7 @@ configure(commonProjects) {
       implementation 'org.slf4j:slf4j-simple:1.7.25'
       implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
       implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+      implementation 'org.apache.kafka:kafka-streams:2.6.0'
 
       // Use JUnit test framework
       testImplementation 'junit:junit:4.12'
@@ -108,7 +110,7 @@ configure(commonProjects) {
 
 // Per default XML reports for SpotBugs are generated
 // Include this to generate HTML reports
-tasks.withType(com.github.spotbugs.SpotBugsTask) {
+tasks.withType(com.github.spotbugs.snom.SpotBugsTask) {
   reports {
     // Either HTML or XML reports can be activated
     html.enabled true
@@ -165,7 +167,7 @@ subprojects {
     reportLevel = "low"
     effort = "max"
     ignoreFailures = false
-    toolVersion = '3.1.7'
+    toolVersion = '4.1.4'
   }
 }
 
diff --git a/config/README.md b/benchmarks/config/README.md
similarity index 100%
rename from config/README.md
rename to benchmarks/config/README.md
diff --git a/config/checkstyle-suppression.xml b/benchmarks/config/checkstyle-suppression.xml
similarity index 100%
rename from config/checkstyle-suppression.xml
rename to benchmarks/config/checkstyle-suppression.xml
diff --git a/config/checkstyle.xml b/benchmarks/config/checkstyle.xml
similarity index 100%
rename from config/checkstyle.xml
rename to benchmarks/config/checkstyle.xml
diff --git a/config/eclipse-cleanup.xml b/benchmarks/config/eclipse-cleanup.xml
similarity index 100%
rename from config/eclipse-cleanup.xml
rename to benchmarks/config/eclipse-cleanup.xml
diff --git a/config/eclipse-formatter.xml b/benchmarks/config/eclipse-formatter.xml
similarity index 100%
rename from config/eclipse-formatter.xml
rename to benchmarks/config/eclipse-formatter.xml
diff --git a/config/eclipse-import-order.importorder b/benchmarks/config/eclipse-import-order.importorder
similarity index 100%
rename from config/eclipse-import-order.importorder
rename to benchmarks/config/eclipse-import-order.importorder
diff --git a/config/pmd.xml b/benchmarks/config/pmd.xml
similarity index 100%
rename from config/pmd.xml
rename to benchmarks/config/pmd.xml
diff --git a/config/spotbugs-exclude-filter.xml b/benchmarks/config/spotbugs-exclude-filter.xml
similarity index 100%
rename from config/spotbugs-exclude-filter.xml
rename to benchmarks/config/spotbugs-exclude-filter.xml
diff --git a/benchmarks/gradle/wrapper/gradle-wrapper.jar b/benchmarks/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..457aad0d98108420a977756b7145c93c8910b076
Binary files /dev/null and b/benchmarks/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/gradle/wrapper/gradle-wrapper.properties b/benchmarks/gradle/wrapper/gradle-wrapper.properties
similarity index 91%
rename from gradle/wrapper/gradle-wrapper.properties
rename to benchmarks/gradle/wrapper/gradle-wrapper.properties
index e0b3fb8d70b1bbf790f6f8ed1c928ddf09f54628..4d9ca1649142b0c20144adce78e2472e2da01c30 100644
--- a/gradle/wrapper/gradle-wrapper.properties
+++ b/benchmarks/gradle/wrapper/gradle-wrapper.properties
@@ -1,5 +1,5 @@
 distributionBase=GRADLE_USER_HOME
 distributionPath=wrapper/dists
-distributionUrl=https\://services.gradle.org/distributions/gradle-4.10.2-bin.zip
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.7.1-bin.zip
 zipStoreBase=GRADLE_USER_HOME
 zipStorePath=wrapper/dists
diff --git a/gradlew b/benchmarks/gradlew
similarity index 99%
rename from gradlew
rename to benchmarks/gradlew
index cccdd3d517fc5249beaefa600691cf150f2fa3e6..af6708ff229fda75da4f7cc4da4747217bac4d53 100755
--- a/gradlew
+++ b/benchmarks/gradlew
@@ -28,7 +28,7 @@ APP_NAME="Gradle"
 APP_BASE_NAME=`basename "$0"`
 
 # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-DEFAULT_JVM_OPTS=""
+DEFAULT_JVM_OPTS='"-Xmx64m"'
 
 # Use the maximum available, or set MAX_FD != -1 to use that value.
 MAX_FD="maximum"
diff --git a/gradlew.bat b/benchmarks/gradlew.bat
similarity index 94%
rename from gradlew.bat
rename to benchmarks/gradlew.bat
index e95643d6a2ca62258464e83c72f5156dc941c609..0f8d5937c4ad18feb44a19e55ad1e37cc159260f 100644
--- a/gradlew.bat
+++ b/benchmarks/gradlew.bat
@@ -14,7 +14,7 @@ set APP_BASE_NAME=%~n0
 set APP_HOME=%DIRNAME%
 
 @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
-set DEFAULT_JVM_OPTS=
+set DEFAULT_JVM_OPTS="-Xmx64m"
 
 @rem Find java.exe
 if defined JAVA_HOME goto findJavaFromJavaHome
diff --git a/settings.gradle b/benchmarks/settings.gradle
similarity index 100%
rename from settings.gradle
rename to benchmarks/settings.gradle
diff --git a/uc1-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc1-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc1-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc1-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc1-application/Dockerfile b/benchmarks/uc1-application/Dockerfile
similarity index 100%
rename from uc1-application/Dockerfile
rename to benchmarks/uc1-application/Dockerfile
diff --git a/uc1-application/build.gradle b/benchmarks/uc1-application/build.gradle
similarity index 100%
rename from uc1-application/build.gradle
rename to benchmarks/uc1-application/build.gradle
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
similarity index 56%
rename from uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
index a35cc37b36fb906e5c5495006126374d4de4656c..f0d8062a2442181507c0bef990b73e0e9cf4a372 100644
--- a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
@@ -3,7 +3,6 @@ package theodolite.uc1.application;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
-import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc1.streamprocessing.Uc1KafkaStreamsBuilder;
 import titan.ccp.common.configuration.ServiceConfigurations;
 
@@ -31,18 +30,9 @@ public class HistoryService {
    */
   private void createKafkaStreamsApplication() {
 
-    final Uc1KafkaStreamsBuilder uc1KafkaStreamsBuilder = new Uc1KafkaStreamsBuilder();
-    uc1KafkaStreamsBuilder.inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC));
-
-    final KafkaStreams kafkaStreams = uc1KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .build();
+    final Uc1KafkaStreamsBuilder uc1KafkaStreamsBuilder = new Uc1KafkaStreamsBuilder(this.config);
+
+    final KafkaStreams kafkaStreams = uc1KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
 
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
similarity index 80%
rename from uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
rename to benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
index 7699ecb48369a2041777b901931c46072a10d99f..14335282863bff5a170716b228ea363e3d739685 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
+++ b/benchmarks/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
@@ -1,6 +1,7 @@
 package theodolite.uc1.streamprocessing;
 
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -9,11 +10,9 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  * Builder for the Kafka Streams configuration.
  */
 public class Uc1KafkaStreamsBuilder extends KafkaStreamsBuilder {
-  private String inputTopic; // NOPMD
 
-  public KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc1KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   @Override
diff --git a/uc1-application/src/main/resources/META-INF/application.properties b/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
similarity index 65%
rename from uc1-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc1-application/src/main/resources/META-INF/application.properties
index 3fb301516daa4c7e14875d3d9ca9df9c770eb69e..b46e6246e248cc524c5b6249348c76ded6ec468b 100644
--- a/uc1-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc1-application/src/main/resources/META-INF/application.properties
@@ -3,10 +3,6 @@ application.version=0.0.1
 
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
-kafka.output.topic=output
 
 schema.registry.url=http://localhost:8091
 
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc1-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc1-workload-generator/Dockerfile b/benchmarks/uc1-workload-generator/Dockerfile
similarity index 100%
rename from uc1-workload-generator/Dockerfile
rename to benchmarks/uc1-workload-generator/Dockerfile
diff --git a/uc1-workload-generator/build.gradle b/benchmarks/uc1-workload-generator/build.gradle
similarity index 100%
rename from uc1-workload-generator/build.gradle
rename to benchmarks/uc1-workload-generator/build.gradle
diff --git a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
diff --git a/uc2-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc2-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc2-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc2-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc2-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc2-application/Dockerfile b/benchmarks/uc2-application/Dockerfile
similarity index 100%
rename from uc2-application/Dockerfile
rename to benchmarks/uc2-application/Dockerfile
diff --git a/uc2-application/README.md b/benchmarks/uc2-application/README.md
similarity index 100%
rename from uc2-application/README.md
rename to benchmarks/uc2-application/README.md
diff --git a/uc2-application/build.gradle b/benchmarks/uc2-application/build.gradle
similarity index 100%
rename from uc2-application/build.gradle
rename to benchmarks/uc2-application/build.gradle
diff --git a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
similarity index 67%
rename from uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
index c094adfcd7952e81115dae84ed9c0d371e380c98..2f828278f5a3033c3e479bf82f3c8c5d9d4c380c 100644
--- a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
@@ -36,26 +36,15 @@ public class AggregationService {
    * @param clusterSession the database session which the application should use.
    */
   private void createKafkaStreamsApplication() {
-    // Use case specific stream configuration
-    final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder();
+    final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder(this.config);
     uc2KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .feedbackTopic(this.config.getString(ConfigurationKeys.KAFKA_FEEDBACK_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .configurationTopic(this.config.getString(ConfigurationKeys.KAFKA_CONFIGURATION_TOPIC))
         .emitPeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.EMIT_PERIOD_MS)))
         .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.GRACE_PERIOD_MS)));
 
-    // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
similarity index 93%
rename from uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
index 16addb8510eec2254d4787edbfbfbe186996fdea..1a606ee3df5e6ac2f43b650afe4a9aed036df9cd 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
+++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc2.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -14,16 +15,14 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
   private static final Duration EMIT_PERIOD_DEFAULT = Duration.ofSeconds(1);
   private static final Duration GRACE_PERIOD_DEFAULT = Duration.ZERO;
 
-  private String inputTopic; // NOPMD
   private String feedbackTopic; // NOPMD
   private String outputTopic; // NOPMD
   private String configurationTopic; // NOPMD
   private Duration emitPeriod; // NOPMD
   private Duration gracePeriod; // NOPMD
 
-  public Uc2KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc2KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc2KafkaStreamsBuilder feedbackTopic(final String feedbackTopic) {
diff --git a/uc2-application/src/main/resources/META-INF/application.properties b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
similarity index 78%
rename from uc2-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc2-application/src/main/resources/META-INF/application.properties
index 10c47960adb012ba5c572e3833a37d821189eb8e..8f1af5f590eff7f2b12706d61a7c89d9152f7949 100644
--- a/uc2-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties
@@ -10,8 +10,4 @@ kafka.output.topic=output
 schema.registry.url=http://localhost:8091
 
 emit.period.ms=5000
-grace.period.ms=0
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
+grace.period.ms=0
\ No newline at end of file
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java b/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
similarity index 100%
rename from uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
rename to benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java
diff --git a/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc2-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc2-workload-generator/Dockerfile b/benchmarks/uc2-workload-generator/Dockerfile
similarity index 100%
rename from uc2-workload-generator/Dockerfile
rename to benchmarks/uc2-workload-generator/Dockerfile
diff --git a/uc2-workload-generator/build.gradle b/benchmarks/uc2-workload-generator/build.gradle
similarity index 100%
rename from uc2-workload-generator/build.gradle
rename to benchmarks/uc2-workload-generator/build.gradle
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
similarity index 100%
rename from uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
rename to benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
diff --git a/uc2-workload-generator/src/main/resources/META-INF/application.properties b/benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties
similarity index 100%
rename from uc2-workload-generator/src/main/resources/META-INF/application.properties
rename to benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties
diff --git a/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java b/benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
similarity index 100%
rename from uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
rename to benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
diff --git a/uc3-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc3-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc3-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc3-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc3-application/Dockerfile b/benchmarks/uc3-application/Dockerfile
similarity index 100%
rename from uc3-application/Dockerfile
rename to benchmarks/uc3-application/Dockerfile
diff --git a/uc3-application/build.gradle b/benchmarks/uc3-application/build.gradle
similarity index 100%
rename from uc3-application/build.gradle
rename to benchmarks/uc3-application/build.gradle
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
similarity index 63%
rename from uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
index b245b1645c9e5ee68df3f108802c9b91d70cf017..349512f988bb182d8851e458a1bce244c756bbfe 100644
--- a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
@@ -34,23 +34,13 @@ public class HistoryService {
    *
    */
   private void createKafkaStreamsApplication() {
-    // Use case specific stream configuration
-    final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder();
+    final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder(this.config);
     uc3KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .windowDuration(Duration.ofMinutes(this.windowDurationMinutes));
 
-    // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder.build();
+
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
   }
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
similarity index 88%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
index e74adf7c87673cc0e6ea4004dbcb1c0a6fc907ac..9ab4ea0a96c663af09008bd5358066ca3f8520ac 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
+++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc3.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -11,13 +12,11 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  */
 public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder {
 
-  private String inputTopic; // NOPMD
   private String outputTopic; // NOPMD
   private Duration windowDuration; // NOPMD
 
-  public Uc3KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc3KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc3KafkaStreamsBuilder outputTopic(final String outputTopic) {
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/util/StatsFactory.java
diff --git a/uc3-application/src/main/resources/META-INF/application.properties b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
similarity index 77%
rename from uc3-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc3-application/src/main/resources/META-INF/application.properties
index 2ceaf37224b0bff54b09beaabe29210216e11671..011406f7ef1e23647eeae150d349f472214cbcd4 100644
--- a/uc3-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties
@@ -7,7 +7,3 @@ kafka.output.topic=output
 kafka.window.duration.minutes=1
 
 schema.registry.url=http://localhost:8091
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc3-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc3-workload-generator/Dockerfile b/benchmarks/uc3-workload-generator/Dockerfile
similarity index 100%
rename from uc3-workload-generator/Dockerfile
rename to benchmarks/uc3-workload-generator/Dockerfile
diff --git a/uc3-workload-generator/build.gradle b/benchmarks/uc3-workload-generator/build.gradle
similarity index 100%
rename from uc3-workload-generator/build.gradle
rename to benchmarks/uc3-workload-generator/build.gradle
diff --git a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
diff --git a/uc4-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc4-application/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc4-application/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc4-application/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc4-application/Dockerfile b/benchmarks/uc4-application/Dockerfile
similarity index 100%
rename from uc4-application/Dockerfile
rename to benchmarks/uc4-application/Dockerfile
diff --git a/uc4-application/build.gradle b/benchmarks/uc4-application/build.gradle
similarity index 100%
rename from uc4-application/build.gradle
rename to benchmarks/uc4-application/build.gradle
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
similarity index 67%
rename from uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
index 23af805733de2bb3f6384fa924a2322490ee58d9..12f35e8dcc532b19e470722094ba5aff07420ad2 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
@@ -32,9 +32,8 @@ public class HistoryService {
    */
   private void createKafkaStreamsApplication() {
     // Use case specific stream configuration
-    final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder();
+    final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder(this.config);
     uc4KafkaStreamsBuilder
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
         .aggregtionDuration(
             Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_DURATION_DAYS)))
@@ -42,15 +41,7 @@ public class HistoryService {
             Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_ADVANCE_DAYS)));
 
     // Configuration of the stream application
-    final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder
-        .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
-        .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
+    final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder.build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
     kafkaStreams.start();
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
similarity index 91%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
index 7c9e2c4f790cf1fbb7dd34db573576d1e64077db..bbbb043119857612b1a8b0c60e3a5466cd68447e 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
+++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
@@ -2,6 +2,7 @@ package theodolite.uc4.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
+import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
 import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
@@ -11,14 +12,12 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
  */
 public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
 
-  private String inputTopic; // NOPMD
   private String outputTopic; // NOPMD
   private Duration aggregtionDuration; // NOPMD
   private Duration aggregationAdvance; // NOPMD
 
-  public Uc4KafkaStreamsBuilder inputTopic(final String inputTopic) {
-    this.inputTopic = inputTopic;
-    return this;
+  public Uc4KafkaStreamsBuilder(final Configuration config) {
+    super(config);
   }
 
   public Uc4KafkaStreamsBuilder outputTopic(final String outputTopic) {
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
similarity index 100%
rename from uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java
diff --git a/uc4-application/src/main/resources/META-INF/application.properties b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
similarity index 79%
rename from uc4-application/src/main/resources/META-INF/application.properties
rename to benchmarks/uc4-application/src/main/resources/META-INF/application.properties
index e577c880a8ff8169699acb8598e323b8671e8d5e..b46681533e63bf86a51439778a46940da348559d 100644
--- a/uc4-application/src/main/resources/META-INF/application.properties
+++ b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties
@@ -8,7 +8,3 @@ aggregation.duration.days=30
 aggregation.advance.days=1
 
 schema.registry.url=http://localhost:8091
-
-num.threads=1
-commit.interval.ms=100
-cache.max.bytes.buffering=-1
diff --git a/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs
diff --git a/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/uc4-workload-generator/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/uc4-workload-generator/Dockerfile b/benchmarks/uc4-workload-generator/Dockerfile
similarity index 100%
rename from uc4-workload-generator/Dockerfile
rename to benchmarks/uc4-workload-generator/Dockerfile
diff --git a/uc4-workload-generator/build.gradle b/benchmarks/uc4-workload-generator/build.gradle
similarity index 100%
rename from uc4-workload-generator/build.gradle
rename to benchmarks/uc4-workload-generator/build.gradle
diff --git a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
similarity index 100%
rename from uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
rename to benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
diff --git a/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
similarity index 100%
rename from workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
rename to benchmarks/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
similarity index 100%
rename from workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
rename to benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs b/benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
similarity index 100%
rename from workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
rename to benchmarks/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
diff --git a/workload-generator-commons/build.gradle b/benchmarks/workload-generator-commons/build.gradle
similarity index 100%
rename from workload-generator-commons/build.gradle
rename to benchmarks/workload-generator-commons/build.gradle
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
similarity index 100%
rename from workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/docker-test/uc1-docker-compose/docker-compose.yml
index d394255951151d931b73e4c923bb10ecaed66a2c..905e6e30bfd38900e896be45d8a4b15389b2f54f 100755
--- a/docker-test/uc1-docker-compose/docker-compose.yml
+++ b/docker-test/uc1-docker-compose/docker-compose.yml
@@ -1,26 +1,53 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "9092"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    #ports:
+    #  - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    ports:
+      - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc1-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
     image: theodolite/theodolite-uc1-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/docker-test/uc2-docker-compose/docker-compose.yml
index f730148a89d41a819d81a4770e0d53a960dbe493..e6511bfd9fa7ea1e62bf9f3787ac6f3c0acc0107 100755
--- a/docker-test/uc2-docker-compose/docker-compose.yml
+++ b/docker-test/uc2-docker-compose/docker-compose.yml
@@ -1,26 +1,53 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
-    ports:
-      - "2181:2181"
+    image: confluentinc/cp-zookeeper
+    expose:
+      - "9092"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    expose:
+      - "8081"
+    ports:
+      - 8081:8081
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc2-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
     image: theodolite/theodolite-uc2-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/docker-test/uc3-docker-compose/docker-compose.yml
index 2a3cb23a79f9edda699fe1bb07c1b922614aeb13..9d2da8e87621c1902ff101efd42ff52436416b77 100755
--- a/docker-test/uc3-docker-compose/docker-compose.yml
+++ b/docker-test/uc3-docker-compose/docker-compose.yml
@@ -1,27 +1,58 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
+    #image: wurstmeister/zookeeper
+    image: confluentinc/cp-zookeeper
     ports:
       - "2181:2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
     ports:
       - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
+  schema-registry:
+    image: confluentinc/cp-schema-registry:5.3.1
+    depends_on:
+      - zookeeper
+      - kafka
+    ports:
+      - "8081:8081"
+    expose:
+      - "8081"
+    environment:
+      SCHEMA_REGISTRY_HOST_NAME: schema-registry
+      SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc3-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
       KAFKA_WINDOW_DURATION_MINUTES: 60
   uc-wg: 
     image: theodolite/theodolite-uc3-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/docker-test/uc4-docker-compose/docker-compose.yml
index 1f015f23b2e8b98eba27ae6f387adb123ae2ccc2..530852b2df5ef2c70f03a11ac2445ce587a3760f 100755
--- a/docker-test/uc4-docker-compose/docker-compose.yml
+++ b/docker-test/uc4-docker-compose/docker-compose.yml
@@ -1,25 +1,32 @@
 version: '2'
 services:
   zookeeper:
-    image: wurstmeister/zookeeper
+    #image: wurstmeister/zookeeper
+    image: confluentinc/cp-zookeeper
     ports:
       - "2181:2181"
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
   kafka:
     image: wurstmeister/kafka
-    ports:
-      - "9092:9092"
     expose:
       - "9092"
+    ports:
+      - 19092:19092
     environment:
-      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,CONNECTIONS_FROM_HOST:PLAINTEXT
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
-      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1"
   schema-registry:
     image: confluentinc/cp-schema-registry:5.3.1
     depends_on:
       - zookeeper
       - kafka
+    ports:
+      - "8081:8081"
     expose:
       - "8081"
     environment:
@@ -27,10 +34,22 @@ services:
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
     image: theodolite/theodolite-uc4-kstreams-app:latest
+    depends_on:
+      - schema-registry
+      - kafka
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
   uc-wg: 
     image: theodolite/theodolite-uc4-workload-generator:latest
+    depends_on:
+      - schema-registry
+      - kafka
+      - zookeeper
     environment:
+      ZK_HOST: zookeeper
+      ZK_PORT: 2181
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      SCHEMA_REGISTRY_URL: http://schema-registry:8081
+      INSTANCES: 1
+      NUM_SENSORS: 100
diff --git a/execution/.gitignore b/execution/.gitignore
index d4dceff0274cd6ab3296e85e995f7e5d504f114d..bac9a5d1eeb12d9e40d38376904e8fb69c0e5231 100644
--- a/execution/.gitignore
+++ b/execution/.gitignore
@@ -1 +1,2 @@
-exp_counter.txt
\ No newline at end of file
+exp_counter.txt
+results
diff --git a/execution/README.md b/execution/README.md
index 68ab9b244ecf8fe0e75580bc4ce21d9efc3b0639..32c7cee386cfe1ce7f39ffd4539ea37675b83fbf 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -14,6 +14,27 @@ For executing benchmarks, access to Kubernetes cluster is required. We suggest
 to create a dedicated namespace for executing our benchmarks. The following
 services need to be available as well.
 
+### Kubernetes Volume
+
+For executing the benchmark as a Kubernetes job it is required to use a volume to store the results of the executions.
+In `infrastructure/kubernetes` are two files for creating a volume.
+Either one of them should be used.
+
+The `volumeSingle.yaml` is meant for systems where Kubernetes is run locally (e.g. minikube, kind etc.).
+However, you can also use the other file.
+In `volumeSingle.yaml` you need to set `path` to the path on your machine where the results should be stored.
+
+The `volumeCluster.yaml` should be used when Kubernetes runs in the cloud.
+In the `nodeAffinity` section you need to exchange `<node-name>` to the name of the node where the volume should be created (this node will most likely execute also the job).
+However, you can also set a different `nodeAffinity`.
+Further you need to set `path` to the path on the node where the results should be stored.
+
+After setting the properties you can create the volume with:
+
+```sh
+kubectl apply -f iinfrastructure/kubernetes/volume(Single|Cluster).yaml
+```
+
 #### Prometheus
 
 We suggest to use the [Prometheus Operator](https://github.com/coreos/prometheus-operator)
@@ -34,7 +55,7 @@ After installation, you need to create a Prometheus instance:
 kubectl apply -f infrastructure/prometheus/prometheus.yaml
 ```
 
-You might also need to apply the [ServiceAccount](infrastructure/prometheus/service-account.yaml), [ClusterRole](infrastructure/prometheus/cluster-role.yaml) 
+You might also need to apply the [ServiceAccount](infrastructure/prometheus/service-account.yaml), [ClusterRole](infrastructure/prometheus/cluster-role.yaml)
 and the [CusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml),
 depending on your cluster's security policies.
 
@@ -132,7 +153,7 @@ root directory). As set of requirements is needed. You can install them with the
 command (make sure to be in your virtual environment if you use one):
 
 ```sh
-pip install -r requirements.txt 
+pip install -r requirements.txt
 ```
 
 
@@ -183,4 +204,3 @@ There are the following benchmarking strategies:
 * `check-all`: For each dimension value, execute one lag experiment for all amounts of instances within the current domain.
 * `linear-search`: A heuristic which works as follows: For each dimension value, execute one lag experiment for all number of instances within the current domain. The execution order is from the lowest number of instances to the highest amount of instances and the execution for each dimension value is stopped, when a suitable amount of instances is found or if all lag experiments for the dimension value were not successful.
 * `binary-search`: A heuristic which works as follows: For each dimension value, execute one lag experiment for all number of instances within the current domain. The execution order is in a binary-search-like manner. The execution is stopped, when a suitable amount of instances is found or if all lag experiments for the dimension value were not successful.
-
diff --git a/execution/infrastructure/kubernetes/rbac/role-binding.yaml b/execution/infrastructure/kubernetes/rbac/role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ef2d0c015a1b42880f9652bc241950548a952792
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/role-binding.yaml
@@ -0,0 +1,11 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name: theodolite
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: theodolite
+subjects:
+- kind: ServiceAccount
+  name: theodolite
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/rbac/role.yaml b/execution/infrastructure/kubernetes/rbac/role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..84ba14a8bc7a6eceb8a20596ede057ca2271b967
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/role.yaml
@@ -0,0 +1,41 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: theodolite
+rules:
+  - apiGroups:
+    - apps
+    resources:
+    - deployments
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - services
+    - pods
+    - servicemonitors
+    - configmaps
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - pods/exec
+    verbs:
+    - create
+    - get
+  - apiGroups:
+    - monitoring.coreos.com
+    resources:
+    - servicemonitors
+    verbs:
+    - delete
+    - list
+    - create
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/rbac/service-account.yaml b/execution/infrastructure/kubernetes/rbac/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c7f33076e31ac53d02491c80fd61cdc5b241dfd7
--- /dev/null
+++ b/execution/infrastructure/kubernetes/rbac/service-account.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: theodolite
\ No newline at end of file
diff --git a/execution/infrastructure/kubernetes/volumeCluster.yaml b/execution/infrastructure/kubernetes/volumeCluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78c1501afedccf03d3f415c928010dbb5d131c70
--- /dev/null
+++ b/execution/infrastructure/kubernetes/volumeCluster.yaml
@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: theodolite-pv-volume
+spec:
+  capacity:
+    storage: 1Gi
+  volumeMode: Filesystem
+  accessModes:
+  - ReadWriteOnce
+  persistentVolumeReclaimPolicy: Delete
+  storageClassName: local-storage
+  local:
+    path: </your/path/to/results/folder>
+  nodeAffinity:
+    required:
+      nodeSelectorTerms:
+      - matchExpressions:
+        - key: kubernetes.io/hostname
+          operator: In
+          values:
+          - <node-name>
+---
+# https://kubernetes.io/docs/concepts/storage/storage-classes/#local
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+  name: local-storage
+provisioner: kubernetes.io/no-provisioner
+volumeBindingMode: WaitForFirstConsumer
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: theodolite-pv-claim
+spec:
+  storageClassName: local-storage
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
diff --git a/execution/infrastructure/kubernetes/volumeSingle.yaml b/execution/infrastructure/kubernetes/volumeSingle.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b0a132dd1922652e52daa0f691a6014a9b8ec1a8
--- /dev/null
+++ b/execution/infrastructure/kubernetes/volumeSingle.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: theodolite-pv-volume
+  labels:
+    type: local
+spec:
+  storageClassName: theodolite
+  capacity:
+    storage: 100m
+  accessModes:
+    - ReadWriteOnce
+  hostPath:
+    path: </your/path/to/results/folder>
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: theodolite-pv-claim
+spec:
+  storageClassName: theodolite
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 100m
diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
index 3950f12413745a6f3802a2e223123830f7d03649..5b78ef3653753a2b95ac9b74bf8de156a71fb14c 100644
--- a/execution/lag_analysis.py
+++ b/execution/lag_analysis.py
@@ -8,7 +8,7 @@ import csv
 import logging
 
 
-def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_base_url = 'http://kube1.se.internal:32529'):
+def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
     print("Main")
     time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
 
@@ -70,11 +70,11 @@ def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_
     fields = [exp_id, datetime.now(), benchmark, dim_value,
               instances, linear_regressor.coef_]
     print(fields)
-    with open(r'results.csv', 'a') as f:
+    with open(f'{result_path}/results.csv', 'a') as f:
         writer = csv.writer(f)
         writer.writerow(fields)
 
-    filename = f"exp{exp_id}_{benchmark}_{dim_value}_{instances}"
+    filename = f"{result_path}/exp{exp_id}_{benchmark}_{dim_value}_{instances}"
 
     plt.plot(X, Y)
     plt.plot(X, Y_pred, color='red')
@@ -163,4 +163,5 @@ if __name__ == '__main__':
     instances = sys.argv[4]
     execution_minutes = int(sys.argv[5])
 
-    main(exp_id, benchmark, dim_value, instances, execution_minutes)
+    main(exp_id, benchmark, dim_value, instances, execution_minutes,
+        'http://localhost:9090', 'results')
diff --git a/execution/lib/cli_parser.py b/execution/lib/cli_parser.py
index 0b0a7438910560f5b5871b0023c92d6743dd6cc9..eaebaa6cc99959bc8a41e50f3d6a63acaf5ab817 100644
--- a/execution/lib/cli_parser.py
+++ b/execution/lib/cli_parser.py
@@ -1,6 +1,7 @@
 import argparse
 import os
 
+
 def env_list_default(env, tf):
     """
     Makes a list from an environment string.
@@ -10,6 +11,40 @@ def env_list_default(env, tf):
         v = [tf(s) for s in v.split(',')]
     return v
 
+
+def key_values_to_dict(kvs):
+    """
+    Given a list with key values in form `Key=Value` it creates a dict from it.
+    """
+    my_dict = {}
+    for kv in kvs:
+        k, v = kv.split("=")
+        my_dict[k] = v
+    return my_dict
+
+
+def env_dict_default(env):
+    """
+    Makes a dict from an environment string.
+    """
+    v = os.environ.get(env)
+    if v is not None:
+        return key_values_to_dict(v.split(','))
+    else:
+        return dict()
+
+
+class StoreDictKeyPair(argparse.Action):
+    def __init__(self, option_strings, dest, nargs=None, **kwargs):
+        self._nargs = nargs
+        super(StoreDictKeyPair, self).__init__(
+            option_strings, dest, nargs=nargs, **kwargs)
+
+    def __call__(self, parser, namespace, values, option_string=None):
+        my_dict = key_values_to_dict(values)
+        setattr(namespace, self.dest, my_dict)
+
+
 def default_parser(description):
     """
     Returns the default parser that can be used for thodolite and run uc py
@@ -33,11 +68,6 @@ def default_parser(description):
                         metavar='<memory limit>',
                         default=os.environ.get('MEMORY_LIMIT', '4Gi'),
                         help='Kubernetes memory limit')
-    parser.add_argument('--commit-ms',
-                        metavar='<commit ms>',
-                        type=int,
-                        default=os.environ.get('COMMIT_MS', 100),
-                        help='Kafka Streams commit interval in milliseconds')
     parser.add_argument('--duration', '-d',
                         metavar='<duration>',
                         type=int,
@@ -50,16 +80,33 @@ def default_parser(description):
                         help='Defines the Kubernetes where the applications should run')
     parser.add_argument('--reset',
                         action="store_true",
+                        default=os.environ.get(
+                            'RESET', 'false').lower() == 'true',
                         help='Resets the environment before execution')
     parser.add_argument('--reset-only',
                         action="store_true",
+                        default=os.environ.get(
+                            'RESET_ONLY', 'false').lower() == 'true',
                         help='Only resets the environment. Ignores all other parameters')
     parser.add_argument('--prometheus',
                         metavar='<URL>',
-                        default=os.environ.get('PROMETHEUS_BASE_URL'),
+                        default=os.environ.get(
+                            'PROMETHEUS_BASE_URL', 'http://localhost:9090'),
                         help='Defines where to find the prometheus instance')
+    parser.add_argument('--path',
+                        metavar='<path>',
+                        default=os.environ.get('RESULT_PATH', 'results'),
+                        help='A directory path for the results')
+    parser.add_argument("--configurations",
+                        metavar="KEY=VAL",
+                        dest="configurations",
+                        action=StoreDictKeyPair,
+                        nargs="+",
+                        default=env_dict_default('CONFIGURATIONS'),
+                        help='Defines the environment variables for the UC')
     return parser
 
+
 def benchmark_parser(description):
     """
     Parser for the overall benchmark execution
@@ -82,6 +129,8 @@ def benchmark_parser(description):
                         help='[mandatory] List of instances used in benchmarks')
     parser.add_argument('--domain-restriction',
                         action="store_true",
+                        default=os.environ.get(
+                            'DOMAIN_RESTRICTION', 'false').lower() == 'true',
                         help='To use domain restriction. For details see README')
     parser.add_argument('--search-strategy',
                         metavar='<strategy>',
@@ -89,6 +138,7 @@ def benchmark_parser(description):
                         help='The benchmarking search strategy. Can be set to default, linear-search or binary-search')
     return parser
 
+
 def execution_parser(description):
     """
     Parser for executing one use case
diff --git a/execution/run_uc.py b/execution/run_uc.py
index 0d7ca59ad23fac3c343cf2c6411716d7185cfcb5..9ac9c37259a14eec0a89e45d0831873e276604e5 100644
--- a/execution/run_uc.py
+++ b/execution/run_uc.py
@@ -24,7 +24,7 @@ def load_variables():
     parser = execution_parser(description='Run use case Programm')
     args = parser.parse_args()
     print(args)
-    if args.exp_id is None or args.uc is None or args.load is None or args.instances is None:
+    if (args.exp_id is None or args.uc is None or args.load is None or args.instances is None) and not args.reset_only:
         print('The options --exp-id, --uc, --load and --instances are mandatory.')
         print('Some might not be set!')
         sys.exit(1)
@@ -41,8 +41,8 @@ def initialize_kubernetes_api():
         config.load_kube_config()  # try using local config
     except config.config_exception.ConfigException as e:
         # load config from pod, if local config is not available
-        logging.debug('Failed loading local Kubernetes configuration,'
-                      + ' try from cluster')
+        logging.debug(
+            'Failed loading local Kubernetes configuration try from cluster')
         logging.debug(e)
         config.load_incluster_config()
 
@@ -58,8 +58,7 @@ def create_topics(topics):
     # Calling exec and waiting for response
     print('Create topics')
     for (topic, partitions) in topics:
-        print('Create topic ' + topic + ' with #' + str(partitions)
-              + ' partitions')
+        print(f'Create topic {topic} with #{partitions} partitions')
         exec_command = [
             '/bin/sh',
             '-c',
@@ -86,7 +85,7 @@ def load_yaml(file_path):
         with f:
             return yaml.safe_load(f)
     except Exception as e:
-        logging.error('Error opening file %s' % file_path)
+        logging.error('Error opening file %s', file_path)
         logging.error(e)
 
 
@@ -105,6 +104,15 @@ def load_yaml_files():
     return wg, app_svc, app_svc_monitor, app_jmx, app_deploy
 
 
+def replace_env_value(container, key, value):
+    """
+    Special method to replace in a container with kubernetes env values
+    the value of a given parameter.
+    """
+    next(filter(lambda x: x['name'] == key, container))[
+        'value'] = value
+
+
 def start_workload_generator(wg_yaml, dim_value, uc_id):
     """Starts the workload generator.
     :param wg_yaml: The yaml object for the workload generator.
@@ -118,17 +126,15 @@ def start_workload_generator(wg_yaml, dim_value, uc_id):
 
     num_sensors = dim_value
     wl_max_records = 150000
-    wl_instances = int(((num_sensors + (wl_max_records - 1)) / wl_max_records))
+    wl_instances = (num_sensors + wl_max_records - 1) // wl_max_records
 
     # set parameters special for uc 2
     if uc_id == '2':
         print('use uc2 stuff')
         num_nested_groups = dim_value
-        num_sensors = '4'
-        approx_num_sensors = int(num_sensors) ** num_nested_groups
-        wl_instances = int(
-            ((approx_num_sensors + wl_max_records - 1) / wl_max_records)
-        )
+        num_sensors = 4
+        approx_num_sensors = num_sensors ** num_nested_groups
+        wl_instances = (approx_num_sensors + wl_max_records - 1) // wl_max_records
 
     # Customize workload generator creations
     wg_yaml['spec']['replicas'] = wl_instances
@@ -139,26 +145,28 @@ def start_workload_generator(wg_yaml, dim_value, uc_id):
         '-workload-generator:latest'
     # Set environment variables
 
-    next(filter(lambda x: x['name'] == 'NUM_SENSORS', wg_containter['env']))[
-        'value'] = str(num_sensors)
-    next(filter(lambda x: x['name'] == 'INSTANCES', wg_containter['env']))[
-        'value'] = str(wl_instances)
+    replace_env_value(wg_containter['env'], 'NUM_SENSORS', str(num_sensors))
+    replace_env_value(wg_containter['env'], 'INSTANCES', str(wl_instances))
+
     if uc_id == '2':  # Special configuration for uc2
-        next(filter(lambda x: x['name'] == 'NUM_NESTED_GROUPS', wg_containter['env']))[
-            'value'] = str(num_nested_groups)
+        replace_env_value(
+            wg_containter['env'], 'NUM_NESTED_GROUPS', str(num_nested_groups))
+
     try:
         wg_ss = appsApi.create_namespaced_deployment(
             namespace=namespace,
             body=wg_yaml
         )
-        print("Deployment '%s' created." % wg_ss.metadata.name)
+        print(f'Deployment {wg_ss.metadata.name} created.')
         return wg_ss
     except client.rest.ApiException as e:
-        print("Deployment creation error: %s" % e.reason)
+        print(f'Deployment creation error: {e.reason}')
         return wg_yaml
 
 
-def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instances, uc_id, commit_interval_ms, memory_limit, cpu_limit):
+def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml,
+                      instances, uc_id, memory_limit, cpu_limit,
+                      configurations):
     """Applies the service, service monitor, jmx config map and start the
     use case application.
 
@@ -168,9 +176,9 @@ def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instanc
     :param deploy_yaml: The yaml object for the application.
     :param int instances: Number of instances for use case application.
     :param string uc_id: The id of the use case to execute.
-    :param int commit_interval_ms: The commit interval in ms.
     :param string memory_limit: The memory limit for the application.
     :param string cpu_limit: The CPU limit for the application.
+    :param dict configurations: A dictionary with ENV variables for configurations.
     :return:
         The Service, ServiceMonitor, JMX ConfigMap and Deployment.
         In case the resource already exist/error the yaml object is returned.
@@ -183,10 +191,10 @@ def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instanc
     try:
         svc = coreApi.create_namespaced_service(
             namespace=namespace, body=svc_yaml)
-        print("Service '%s' created." % svc.metadata.name)
+        print(f'Service {svc.metadata.name} created.')
     except client.rest.ApiException as e:
         svc = svc_yaml
-        logging.error("Service creation error: %s" % e.reason)
+        logging.error("Service creation error: %s", e.reason)
 
     # Create custom object service monitor
     try:
@@ -197,39 +205,54 @@ def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, instanc
             plural="servicemonitors",  # CustomResourceDef of ServiceMonitor
             body=svc_monitor_yaml,
         )
-        print("ServiceMonitor '%s' created." % svc_monitor['metadata']['name'])
+        print(f"ServiceMonitor '{svc_monitor['metadata']['name']}' created.")
     except client.rest.ApiException as e:
         svc_monitor = svc_monitor_yaml
-        logging.error("ServiceMonitor creation error: %s" % e.reason)
+        logging.error("ServiceMonitor creation error: %s", e.reason)
 
     # Apply jmx config map for aggregation service
     try:
         jmx_cm = coreApi.create_namespaced_config_map(
             namespace=namespace, body=jmx_yaml)
-        print("ConfigMap '%s' created." % jmx_cm.metadata.name)
+        print(f"ConfigMap '{jmx_cm.metadata.name}' created.")
     except client.rest.ApiException as e:
         jmx_cm = jmx_yaml
-        logging.error("ConfigMap creation error: %s" % e.reason)
+        logging.error("ConfigMap creation error: %s", e.reason)
 
     # Create deployment
     deploy_yaml['spec']['replicas'] = instances
     app_container = next(filter(
-        lambda x: x['name'] == 'uc-application', deploy_yaml['spec']['template']['spec']['containers']))
+        lambda x: x['name'] == 'uc-application',
+        deploy_yaml['spec']['template']['spec']['containers']))
     app_container['image'] = 'theodolite/theodolite-uc' + uc_id \
         + '-kstreams-app:latest'
-    next(filter(lambda x: x['name'] == 'COMMIT_INTERVAL_MS', app_container['env']))[
-        'value'] = str(commit_interval_ms)
+
+    # Set configurations environment parameters for SPE
+    for k, v in configurations.items():
+        # check if environment variable is already definde in yaml
+        env = next(filter(lambda x: x['name'] == k,
+                          app_container['env']), None)
+        if env is not None:
+            env['value'] = v  # replace value
+        else:
+            # create new environment pair
+            conf = {'name': k, 'value': v}
+            app_container['env'].append(conf)
+
+    # Set resources in Kubernetes
     app_container['resources']['limits']['memory'] = memory_limit
     app_container['resources']['limits']['cpu'] = cpu_limit
+
+    # Deploy application
     try:
         app_deploy = appsApi.create_namespaced_deployment(
             namespace=namespace,
             body=deploy_yaml
         )
-        print("Deployment '%s' created." % app_deploy.metadata.name)
+        print(f"Deployment '{app_deploy.metadata.name}' created.")
     except client.rest.ApiException as e:
         app_deploy = deploy_yaml
-        logging.error("Deployment creation error: %s" % e.reason)
+        logging.error("Deployment creation error: %s", e.reason)
 
     return svc, svc_monitor, jmx_cm, app_deploy
 
@@ -243,12 +266,12 @@ def wait_execution(execution_minutes):
 
     for i in range(execution_minutes):
         time.sleep(60)
-        print(f"Executed: {i+1} minutes")
+        print(f'Executed: {i+1} minutes')
     print('Execution finished')
     return
 
 
-def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prometheus_base_url=None):
+def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
     """
     Runs the evaluation function
     :param string exp_id: ID of the experiment.
@@ -258,12 +281,8 @@ def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prome
     :param int execution_minutes: How long the use case where executed.
     """
     print('Run evaluation function')
-    if prometheus_base_url is None and environ.get('PROMETHEUS_BASE_URL') is None:
-        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances, execution_minutes)
-    elif prometheus_base_url is not None:
-        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances, execution_minutes, prometheus_base_url)
-    else:
-        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances, execution_minutes, environ.get('PROMETHEUS_BASE_URL'))
+    lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances,
+                      execution_minutes, prometheus_base_url, result_path)
     return
 
 
@@ -315,7 +334,7 @@ def stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy):
             name=app_svc_monitor['metadata']['name'])
         print('Resource deleted')
     except Exception as e:
-        print("Error deleting service monitor")
+        print('Error deleting service monitor')
 
     print('Delete jmx config map')
     delete_resource(app_jmx, coreApi.delete_namespaced_config_map)
@@ -368,7 +387,7 @@ def delete_topics(topics):
                       stderr=True, stdin=False,
                       stdout=True, tty=False)
         if resp == '0':
-            print("Topics deleted")
+            print('Topics deleted')
             break
     return
 
@@ -460,7 +479,7 @@ def reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics):
     stop_lag_exporter()
 
 
-def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, commit_interval_ms, execution_minutes, prometheus_base_url, reset, reset_only, ns):
+def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, execution_minutes, prometheus_base_url, reset, ns, result_path, configurations, reset_only=False):
     """
     Main method to execute one time the benchmark for a given use case.
     Start workload generator/application -> execute -> analyse -> stop all
@@ -471,9 +490,9 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi
     :param int partitions: Number of partitions the kafka topics should have.
     :param string cpu_limit: Max CPU utilazation for application.
     :param string memory_limit: Max memory utilazation for application.
-    :param int commit_interval_ms: Kafka Streams commit interval in milliseconds
     :param int execution_minutes: How long to execute the benchmark.
     :param boolean reset: Flag for reset of cluster before execution.
+    :param dict configurations: Key value pairs for setting env variables of UC.
     :param boolean reset_only: Flag to only reset the application.
     """
     global namespace
@@ -519,16 +538,16 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi
         app_deploy,
         instances,
         uc_id,
-        commit_interval_ms,
         memory_limit,
-        cpu_limit)
+        cpu_limit,
+        configurations)
     print('---------------------')
 
     wait_execution(execution_minutes)
     print('---------------------')
 
     run_evaluation(exp_id, uc_id, dim_value, instances,
-                   execution_minutes, prometheus_base_url)
+                   execution_minutes, prometheus_base_url, result_path)
     print('---------------------')
 
     # Reset cluster regular, therefore abort exit not needed anymore
@@ -540,7 +559,7 @@ if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
     args = load_variables()
     print('---------------------')
-    main(args.exp_id, args.uc, args.load, args.instances,
-         args.partitions, args.cpu_limit, args.memory_limit,
-         args.commit_ms, args.duration, args.prometheus, args.reset,
-         args.reset_only, args.namespace)
+    main(args.exp_id, args.uc, args.load, args.instances, args.partitions,
+         args.cpu_limit, args.memory_limit, args.duration, args.prometheus,
+         args.reset, args.namespace, args.path, args.configurations,
+         args.reset_only)
diff --git a/execution/strategies/config.py b/execution/strategies/config.py
index f9a67897286f79b06e5af06d9fb9b067228be33c..c3cd1ff82c4926f5efcc741b027996dbc800916b 100644
--- a/execution/strategies/config.py
+++ b/execution/strategies/config.py
@@ -10,9 +10,13 @@ class ExperimentConfig:
     partitions: int
     cpu_limit: str
     memory_limit: str
-    kafka_streams_commit_interval_ms: int
     execution_minutes: int
+    prometheus_base_url: str
+    reset: bool
+    namespace: str
+    result_path: str
+    configurations: dict
     domain_restriction_strategy: object
     search_strategy: object
     subexperiment_executor: object
-    subexperiment_evaluator: object
\ No newline at end of file
+    subexperiment_evaluator: object
diff --git a/execution/strategies/strategies/config.py b/execution/strategies/strategies/config.py
index 9d92831cd6ba03ad5b4ceeaf1b9741937396a4c2..5c31f8c97a4085931cdfa1fa017d4e5909e21915 100644
--- a/execution/strategies/strategies/config.py
+++ b/execution/strategies/strategies/config.py
@@ -11,5 +11,9 @@ class SubexperimentConfig:
     partitions: int
     cpu_limit: str
     memory_limit: str
-    kafka_streams_commit_interval_ms: int
-    execution_minutes: int
\ No newline at end of file
+    execution_minutes: int
+    prometheus_base_url: str
+    reset: bool
+    namespace: str
+    result_path: str
+    configurations: dict
diff --git a/execution/strategies/strategies/search/binary_search_strategy.py b/execution/strategies/strategies/search/binary_search_strategy.py
index 92eea2e7df4805b82b1b04ded909d68caa8c8b39..8856ead0502279f8f8642da87cf56f794cb1b11c 100644
--- a/execution/strategies/strategies/search/binary_search_strategy.py
+++ b/execution/strategies/strategies/search/binary_search_strategy.py
@@ -5,7 +5,7 @@ from strategies.strategies.config import SubexperimentConfig
 def binary_search(config, dim_value, lower, upper, subexperiment_counter):
     if lower == upper:
         print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
         config.subexperiment_executor.execute(subexperiment_config)
         result = config.subexperiment_evaluator.execute(subexperiment_config)
         if result==1: # successful, the upper neighbor is assumed to also has been successful
@@ -14,14 +14,14 @@ def binary_search(config, dim_value, lower, upper, subexperiment_counter):
             return (lower+1, subexperiment_counter)
     elif lower+1==upper:
         print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
         config.subexperiment_executor.execute(subexperiment_config)
         result = config.subexperiment_evaluator.execute(subexperiment_config)
         if result==1: # minimal instances found
             return (lower, subexperiment_counter)
         else: # not successful, check if lower+1 instances are sufficient
             print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[upper]}")
-            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
             config.subexperiment_executor.execute(subexperiment_config)
             result = config.subexperiment_evaluator.execute(subexperiment_config)
             if result == 1: # minimal instances found
@@ -32,7 +32,7 @@ def binary_search(config, dim_value, lower, upper, subexperiment_counter):
         # test mid
         mid=(upper+lower)//2
         print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[mid]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
         config.subexperiment_executor.execute(subexperiment_config)
         result = config.subexperiment_evaluator.execute(subexperiment_config)
         if result == 1: # success -> search in (lower, mid-1)
@@ -44,4 +44,3 @@ def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_c
     upper = len(config.replicass)-1
     dim_value=config.dim_values[dim_value_index]
     return binary_search(config, dim_value, lower_replicas_bound_index, upper, subexperiment_counter)
-
diff --git a/execution/strategies/strategies/search/check_all_strategy.py b/execution/strategies/strategies/search/check_all_strategy.py
index cd1a548d2142951a38ab04eba04ec6b0fb32e2a6..8e9d6c3ca0924d724c4f55032ebc24a92bc3ad93 100644
--- a/execution/strategies/strategies/search/check_all_strategy.py
+++ b/execution/strategies/strategies/search/check_all_strategy.py
@@ -12,7 +12,7 @@ def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_c
         replicas=config.replicass[lower_replicas_bound_index]
         print(f"Run subexperiment {subexperiment_counter} of {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
 
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
 
         config.subexperiment_executor.execute(subexperiment_config)
 
diff --git a/execution/strategies/strategies/search/linear_search_strategy.py b/execution/strategies/strategies/search/linear_search_strategy.py
index eeda5ad32b22174ed3552180ee6307911e18b657..f2436658eec0bd4160259a09c272def40fbc130c 100644
--- a/execution/strategies/strategies/search/linear_search_strategy.py
+++ b/execution/strategies/strategies/search/linear_search_strategy.py
@@ -11,7 +11,7 @@ def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_c
         replicas=config.replicass[lower_replicas_bound_index]
         print(f"Run subexperiment {subexperiment_counter} from at most {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
 
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.kafka_streams_commit_interval_ms, config.execution_minutes)
+        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
 
         config.subexperiment_executor.execute(subexperiment_config)
         result = config.subexperiment_evaluator.execute(subexperiment_config)
@@ -19,4 +19,4 @@ def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_c
             return (lower_replicas_bound_index, subexperiment_counter)
         else:
             lower_replicas_bound_index+=1
-    return (lower_replicas_bound_index, subexperiment_counter)
\ No newline at end of file
+    return (lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
index e7f0c91564057ac2bbdc64493e750c4c967476dd..4e46d2d6ccabb601d9df373a540d23e73d60be28 100644
--- a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
+++ b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
@@ -4,10 +4,10 @@ import os
 import lib.trend_slope_computer as trend_slope_computer
 
 THRESHOLD = 2000
-WARMUP_SEC = 60 
+WARMUP_SEC = 60
 
 def execute(config):
-    cwd = os.getcwd()
+    cwd = f'{os.getcwd()}/{config.result_path}'
     file = f"exp{config.exp_id}_uc{config.use_case}_{config.dim_value}_{config.replicas}_totallag.csv"
 
     trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC, THRESHOLD)
diff --git a/execution/strategies/subexperiment_execution/subexperiment_executor.py b/execution/strategies/subexperiment_execution/subexperiment_executor.py
index 7bcf182f5bee1fcc99d2a8c0040df208ae77bdb3..6931dacfc72081cbe112c4d6d1003703ba42c526 100644
--- a/execution/strategies/subexperiment_execution/subexperiment_executor.py
+++ b/execution/strategies/subexperiment_execution/subexperiment_executor.py
@@ -12,9 +12,9 @@ def execute(subexperiment_config):
         partitions=subexperiment_config.partitions,
         cpu_limit=subexperiment_config.cpu_limit,
         memory_limit=subexperiment_config.memory_limit,
-        commit_interval_ms=subexperiment_config.kafka_streams_commit_interval_ms,
         execution_minutes=int(subexperiment_config.execution_minutes),
-        prometheus_base_url=None,
-        reset=False,
-        reset_only=False,
-        ns="default")
+        prometheus_base_url=subexperiment_config.prometheus_base_url,
+        reset=subexperiment_config.reset,
+        ns=subexperiment_config.namespace,
+        result_path=subexperiment_config.result_path,
+        configurations=subexperiment_config.configurations)
diff --git a/execution/theodolite.py b/execution/theodolite.py
index 3c0506355aff373795adb762e0b66ec64456c5df..ae4680391f1fa027256b48087d68ba3808e60253 100755
--- a/execution/theodolite.py
+++ b/execution/theodolite.py
@@ -4,6 +4,7 @@ import argparse
 from lib.cli_parser import benchmark_parser
 import logging  # logging
 import os
+import run_uc
 import sys
 from strategies.config import ExperimentConfig
 import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
@@ -22,7 +23,7 @@ def load_variables():
     parser = benchmark_parser("Run theodolite benchmarking")
     args = parser.parse_args()
     print(args)
-    if args.uc is None or args.loads is None or args.instances_list is None:
+    if (args.uc is None or args.loads is None or args.instances_list is None) and not args.reset_only:
         print('The options --uc, --loads and --instances are mandatory.')
         print('Some might not be set!')
         sys.exit(1)
@@ -30,142 +31,84 @@ def load_variables():
 
 
 def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
-         commit_ms, duration, domain_restriction, search_strategy,
-         prometheus_base_url ,reset, reset_only, namespace):
+         duration, domain_restriction, search_strategy, prometheus_base_url,
+         reset, namespace, result_path, configurations):
 
-    print(f"Domain restriction of search space activated: {domain_restriction}")
+    print(
+        f"Domain restriction of search space activated: {domain_restriction}")
     print(f"Chosen search strategy: {search_strategy}")
 
-    if os.path.exists("exp_counter.txt"):
-        with open("exp_counter.txt", mode="r") as read_stream:
+    counter_path = f"{result_path}/exp_counter.txt"
+
+    if os.path.exists(counter_path):
+        with open(counter_path, mode="r") as read_stream:
             exp_id = int(read_stream.read())
     else:
         exp_id = 0
+        # Create the directory if not exists
+        os.makedirs(result_path, exist_ok=True)
 
     # Store metadata
     separator = ","
     lines = [
-            f"UC={uc}\n",
-            f"DIM_VALUES={separator.join(map(str, loads))}\n",
-            f"REPLICAS={separator.join(map(str, instances_list))}\n",
-            f"PARTITIONS={partitions}\n",
-            f"CPU_LIMIT={cpu_limit}\n",
-            f"MEMORY_LIMIT={memory_limit}\n",
-            f"KAFKA_STREAMS_COMMIT_INTERVAL_MS={commit_ms}\n",
-            f"EXECUTION_MINUTES={duration}\n",
-            f"DOMAIN_RESTRICTION={domain_restriction}\n",
-            f"SEARCH_STRATEGY={search_strategy}"
-            ]
-    with open(f"exp{exp_id}_uc{uc}_meta.txt", "w") as stream:
+        f'UC={uc}\n',
+        f'DIM_VALUES={separator.join(map(str, loads))}\n',
+        f'REPLICAS={separator.join(map(str, instances_list))}\n',
+        f'PARTITIONS={partitions}\n',
+        f'CPU_LIMIT={cpu_limit}\n',
+        f'MEMORY_LIMIT={memory_limit}\n',
+        f'EXECUTION_MINUTES={duration}\n',
+        f'DOMAIN_RESTRICTION={domain_restriction}\n',
+        f'SEARCH_STRATEGY={search_strategy}\n',
+        f'CONFIGURATIONS={configurations}'
+    ]
+    with open(f"{result_path}/exp{exp_id}_uc{uc}_meta.txt", "w") as stream:
         stream.writelines(lines)
 
-    with open("exp_counter.txt", mode="w") as write_stream:
+    with open(counter_path, mode="w") as write_stream:
         write_stream.write(str(exp_id + 1))
 
-    # domain restriction
+    domain_restriction_strategy = None
+    search_strategy = None
+
+    # Select domain restriction
     if domain_restriction:
-        # domain restriction + linear-search
-        if search_strategy == "linear-search":
-            print(f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
-            experiment_config = ExperimentConfig(
-                use_case=uc,
-                exp_id=exp_id,
-                dim_values=loads,
-                replicass=instances_list,
-                partitions=partitions,
-                cpu_limit=cpu_limit,
-                memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=commit_ms,
-                execution_minutes=duration,
-                domain_restriction_strategy=lower_bound_strategy,
-                search_strategy=linear_search_strategy,
-                subexperiment_executor=subexperiment_executor,
-                subexperiment_evaluator=subexperiment_evaluator)
-        # domain restriction + binary-search
-        elif search_strategy == "binary-search":
-            experiment_config = ExperimentConfig(
-                use_case=uc,
-                exp_id=exp_id,
-                dim_values=loads,
-                replicass=instances_list,
-                partitions=partitions,
-                cpu_limit=cpu_limit,
-                memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=commit_ms,
-                execution_minutes=duration,
-                domain_restriction_strategy=lower_bound_strategy,
-                search_strategy=binary_search_strategy,
-                subexperiment_executor=subexperiment_executor,
-                subexperiment_evaluator=subexperiment_evaluator)
-        # domain restriction + check_all
-        else:
-            print(f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
-            experiment_config = ExperimentConfig(
-                use_case=uc,
-                exp_id=exp_id,
-                dim_values=loads,
-                replicass=instances_list,
-                partitions=partitions,
-                cpu_limit=cpu_limit,
-                memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=commit_ms,
-                execution_minutes=duration,
-                domain_restriction_strategy=lower_bound_strategy,
-                search_strategy=check_all_strategy,
-                subexperiment_executor=subexperiment_executor,
-                subexperiment_evaluator=subexperiment_evaluator)
-    # no domain restriction
+        # domain restriction
+        domain_restriction_strategy = lower_bound_strategy
+    else:
+        # no domain restriction
+        domain_restriction_strategy = no_lower_bound_strategy
+
+    # select search strategy
+    if search_strategy == "linear-search":
+        print(
+            f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
+        search_strategy = linear_search_strategy
+    elif search_strategy == "binary-search":
+        search_strategy = binary_search_strategy
     else:
-        # no domain restriction + linear-search
-        if search_strategy == "linear-search":
-            print(f"Going to execute at most {len(loads)*len(instances_list)} subexperiments in total..")
-            experiment_config = ExperimentConfig(
-                use_case=uc,
-                exp_id=exp_id,
-                dim_values=loads,
-                replicass=instances_list,
-                partitions=partitions,
-                cpu_limit=cpu_limit,
-                memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=commit_ms,
-                execution_minutes=duration,
-                domain_restriction_strategy=no_lower_bound_strategy,
-                search_strategy=linear_search_strategy,
-                subexperiment_executor=subexperiment_executor,
-                subexperiment_evaluator=subexperiment_evaluator)
-        # no domain restriction + binary-search
-        elif search_strategy == "binary-search":
-            experiment_config = ExperimentConfig(
-                use_case=uc,
-                exp_id=exp_id,
-                dim_values=loads,
-                replicass=instances_list,
-                partitions=partitions,
-                cpu_limit=cpu_limit,
-                memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=commit_ms,
-                execution_minutes=duration,
-                domain_restriction_strategy=no_lower_bound_strategy,
-                search_strategy=binary_search_strategy,
-                subexperiment_executor=subexperiment_executor,
-                subexperiment_evaluator=subexperiment_evaluator)
-        # no domain restriction + check_all
-        else:
-            print(f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
-            experiment_config = ExperimentConfig(
-                use_case=uc,
-                exp_id=exp_id,
-                dim_values=loads,
-                replicass=instances_list,
-                partitions=partitions,
-                cpu_limit=cpu_limit,
-                memory_limit=memory_limit,
-                kafka_streams_commit_interval_ms=commit_ms,
-                execution_minutes=duration,
-                domain_restriction_strategy=no_lower_bound_strategy,
-                search_strategy=check_all_strategy,
-                subexperiment_executor=subexperiment_executor,
-                subexperiment_evaluator=subexperiment_evaluator)
+        print(
+            f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
+        search_strategy = check_all_strategy
+
+    experiment_config = ExperimentConfig(
+        use_case=uc,
+        exp_id=exp_id,
+        dim_values=loads,
+        replicass=instances_list,
+        partitions=partitions,
+        cpu_limit=cpu_limit,
+        memory_limit=memory_limit,
+        execution_minutes=duration,
+        prometheus_base_url=prometheus_base_url,
+        reset=reset,
+        namespace=namespace,
+        configurations=configurations,
+        result_path=result_path,
+        domain_restriction_strategy=domain_restriction_strategy,
+        search_strategy=search_strategy,
+        subexperiment_executor=subexperiment_executor,
+        subexperiment_evaluator=subexperiment_evaluator)
 
     executor = ExperimentExecutor(experiment_config)
     executor.execute()
@@ -174,7 +117,12 @@ def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
 if __name__ == '__main__':
     logging.basicConfig(level=logging.INFO)
     args = load_variables()
-    main(args.uc, args.loads, args.instances_list, args.partitions, args.cpu_limit,
-         args.memory_limit, args.commit_ms, args.duration,
-         args.domain_restriction, args.search_strategy, args.prometheus,
-         args.reset, args.reset_only, args.namespace)
+    if args.reset_only:
+        print('Only reset the cluster')
+        run_uc.main(None, None, None, None, None, None, None, None,
+                    None, None, args.namespace, None, None, reset_only=True)
+    else:
+        main(args.uc, args.loads, args.instances_list, args.partitions,
+             args.cpu_limit, args.memory_limit, args.duration,
+             args.domain_restriction, args.search_strategy, args.prometheus,
+             args.reset, args.namespace, args.path, args.configurations)
diff --git a/execution/theodolite.yaml b/execution/theodolite.yaml
index c083d327e99ef9af94f407ea4c2d1e789286ce74..68d53386bcf5e77ce08d964f3c04eb000794575c 100644
--- a/execution/theodolite.yaml
+++ b/execution/theodolite.yaml
@@ -5,65 +5,47 @@ metadata:
 spec:
   template:
     spec:
+      volumes:
+      - name: theodolite-pv-storage
+        persistentVolumeClaim:
+          claimName: theodolite-pv-claim
       containers:
-      - name: theodolite
-        image: bvonheid/theodolite:latest
-        # imagePullPolicy: Never # Used to pull "own" local image
-        env:
-        - name: UC
-          value: "1"
-        - name: LOADS
-          value: "13206, 19635"
-        - name: INSTANCES
-          value: "1, 2"
-        - name: DURATION
-          value: "3"
-        - name: PARTITIONS
-          value: "30"
-        # - name: COMMIT_MS
-        #   value: ""
-        # - name: SEARCH_STRATEGY
-        #   value: ""
-        # - name: CPU_LIMIT
-        #   value: ""
-        # - name: MEMORY_LIMIT
-        #   value: ""
-        - name: PROMETHEUS_BASE_URL
-          value: "http://prometheus-operated:9090"
-        # - name: NAMESPACE
-        #   value: "default"
-        - name: PYTHONUNBUFFERED
-          value: "1"
+        - name: theodolite
+          image: bvonheid/theodolite:latest
+          # imagePullPolicy: Never # Used to pull "own" local image
+          env:
+            - name: UC # mandatory
+              value: "1"
+            - name: LOADS # mandatory
+              value: "100000, 200000"
+            - name: INSTANCES # mandatory
+              value: "1, 2, 3"
+            # - name: DURATION
+            #   value: "5"
+            # - name: PARTITIONS
+            #   value: "40"
+            # - name: DOMAIN_RESTRICTION
+            #   value: "True"
+            # - name: SEARCH_STRATEGY
+            #   value: "linear-search"
+            # - name: CPU_LIMIT
+            #   value: "1000m"
+            # - name: MEMORY_LIMIT
+            #   value: "4Gi"
+            - name: PROMETHEUS_BASE_URL
+              value: "http://prometheus-operated:9090"
+            # - name: NAMESPACE
+            #   value: "default"
+            # - name: CONFIGURATIONS
+            #   value: "COMMIT_INTERVAL_MS=100, NUM_STREAM_THREADS=1"
+            - name: RESULT_PATH
+              value: "results"
+            - name: PYTHONUNBUFFERED # Enable logs in Kubernetes
+              value: "1"
+          volumeMounts:
+            - mountPath: "/app/results"
+              name: theodolite-pv-storage
       restartPolicy: Never
+      # Uncomment if RBAC is enabled and configured
+      # serviceAccountName: theodolite
   backoffLimit: 4
-
-# ---
-# apiVersion: v1
-# kind: ServiceAccount
-# metadata:
-#   name: theodolite
-# ---
-# apiVersion: rbac.authorization.k8s.io/v1
-# kind: Role
-# metadata:
-#   name: modify-pods
-# rules:
-#   - apiGroups: [""]
-#     resources:
-#       - pods
-#     verbs:
-#       - get
-#       - list
-#       - delete
-# ---
-# apiVersion: rbac.authorization.k8s.io/v1
-# kind: RoleBinding
-# metadata:
-#   name: modify-pods-to-sa
-# subjects:
-#   - kind: ServiceAccount
-#     name: theodolite
-# roleRef:
-#   kind: Role
-#   name: modify-pods
-#   apiGroup: rbac.authorization.k8s.io
diff --git a/execution/uc-application/base/aggregation-deployment.yaml b/execution/uc-application/base/aggregation-deployment.yaml
index 81da3eea7688f5d3b3145092d91cb8502e6ad87b..07732ca1dd1e6b2b06f098dfb10a53d38e8d5cae 100644
--- a/execution/uc-application/base/aggregation-deployment.yaml
+++ b/execution/uc-application/base/aggregation-deployment.yaml
@@ -20,14 +20,14 @@ spec:
         - containerPort: 5555
           name: jmx
         env:
-        - name: COMMIT_INTERVAL_MS
-          value: "100"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: SCHEMA_REGISTRY_URL
           value: "http://my-confluent-cp-schema-registry:8081"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        - name: COMMIT_INTERVAL_MS # Set as default for the applications
+          value: "100"
         resources:
           limits:
             memory: 4Gi
diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar
deleted file mode 100644
index 29953ea141f55e3b8fc691d31b5ca8816d89fa87..0000000000000000000000000000000000000000
Binary files a/gradle/wrapper/gradle-wrapper.jar and /dev/null differ
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java b/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
deleted file mode 100644
index ab6f08c017bb78a72c4896d766b38f7b8485c7fb..0000000000000000000000000000000000000000
--- a/uc3-application/src/main/java/theodolite/uc3/application/ConfigurationKeys.java
+++ /dev/null
@@ -1,29 +0,0 @@
-package theodolite.uc3.application;
-
-/**
- * Keys to access configuration parameters.
- */
-public final class ConfigurationKeys {
-
-  public static final String APPLICATION_NAME = "application.name";
-
-  public static final String APPLICATION_VERSION = "application.version";
-
-  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
-
-  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
-
-  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
-
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
-  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
-
-  private ConfigurationKeys() {
-  }
-
-}