diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 90effd8588932aca1b1ff6591ccceeda1854908e..f7e431002e7bf214f377b7458d2eba235b7b6050 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -25,6 +25,11 @@ build:
   tags:
     - exec-docker
   script: ./gradlew --build-cache assemble
+  artifacts:
+    paths:
+      - "build/libs/*.jar"
+      - "*/build/distributions/*.tar"
+    expire_in: 1 day
 
 test:
   stage: test
@@ -76,7 +81,7 @@ spotbugs:
 .deploy:
   stage: deploy
   tags:
-    - exec-docker
+    - exec-dind
   # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
   # for image usage and settings for building with TLS and docker in docker
   image: docker:19.03.1
@@ -86,14 +91,157 @@ spotbugs:
     DOCKER_TLS_CERTDIR: "/certs"
   script:
     - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t titan-ccp-exp-bigdata19-bridge ./exp-bigdata19-bridge
-    - docker tag titan-ccp-exp-bigdata19-bridge $DOCKERHUB_ORG/titan-ccp-exp-bigdata19-bridge:${DOCKER_TAG_NAME}latest
-    - docker tag titan-ccp-exp-bigdata19-bridge $DOCKERHUB_ORG/titan-ccp-exp-bigdata19-bridge:$DOCKER_TAG_NAME$CI_COMMIT_SHA
+    - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
     - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
-    - docker push $DOCKERHUB_ORG/titan-ccp-exp-bigdata19-bridge
+    - docker push $DOCKERHUB_ORG/$IMAGE_NAME
     - docker logout
-  only:
-    variables:
-      - $DOCKERHUB_ORG
-      - $DOCKERHUB_ID
-      - $DOCKERHUB_PW
+  rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-kstreams-app"
+    JAVA_PROJECT_NAME: "uc1-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc1-application/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-kstreams-app"
+    JAVA_PROJECT_NAME: "uc2-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc2-application/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-kstreams-app"
+    JAVA_PROJECT_NAME: "uc3-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc3-application/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-kstreams-app:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-kstreams-app"
+    JAVA_PROJECT_NAME: "uc4-application"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc4-application/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc1-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc1-workload-generator"
+    JAVA_PROJECT_NAME: "uc1-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc1-workload-generator/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc2-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc2-workload-generator"
+    JAVA_PROJECT_NAME: "uc2-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc2-workload-generator/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc3-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc3-workload-generator"
+    JAVA_PROJECT_NAME: "uc3-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc3-workload-generator/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
+
+deploy-uc4-workload-generator:
+  extends: .deploy
+  variables:
+    IMAGE_NAME: "theodolite-uc4-workload-generator"
+    JAVA_PROJECT_NAME: "uc4-workload-generator"
+  rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - uc4-workload-generator/**/*
+      - application-kafkastreams-commons/**/*
+      if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: always
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
+      when: manual
+      allow_failure: true
diff --git a/.settings/org.eclipse.jdt.ui.prefs b/.settings/org.eclipse.jdt.ui.prefs
index 98b5ca8064a352aacfe2aebd13fbd0a87735fc3e..4e04e2891754324a6e1bf55348b6a38f592bb301 100644
--- a/.settings/org.eclipse.jdt.ui.prefs
+++ b/.settings/org.eclipse.jdt.ui.prefs
@@ -101,7 +101,7 @@ sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
 sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
 sp_cleanup.remove_private_constructors=true
 sp_cleanup.remove_redundant_modifiers=false
-sp_cleanup.remove_redundant_semicolons=false
+sp_cleanup.remove_redundant_semicolons=true
 sp_cleanup.remove_redundant_type_arguments=true
 sp_cleanup.remove_trailing_whitespaces=true
 sp_cleanup.remove_trailing_whitespaces_all=true
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/ConfigurationKeys.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
similarity index 67%
rename from uc4-application/src/main/java/theodolite/uc4/application/ConfigurationKeys.java
rename to application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
index bc5fee1f2cb4367284e9db60f575f2652b1bd05b..6302e4c69904aaf57e3f936ee9ad0ead11414a8d 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/ConfigurationKeys.java
+++ b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
@@ -1,31 +1,46 @@
-package theodolite.uc4.application;
+package theodolite.commons.kafkastreams;
 
 /**
  * Keys to access configuration parameters.
  */
 public final class ConfigurationKeys {
-
+  // Common keys
   public static final String APPLICATION_NAME = "application.name";
 
   public static final String APPLICATION_VERSION = "application.version";
 
+  public static final String NUM_THREADS = "num.threads";
+
+  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
+
   public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
 
+  public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
+
   public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
 
+  // Additional topics
+  public static final String KAFKA_FEEDBACK_TOPIC = "kafka.feedback.topic";
+
   public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
 
-  public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
+  public static final String KAFKA_CONFIGURATION_TOPIC = "kafka.configuration.topic";
 
-  public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
+  // UC2
+  public static final String EMIT_PERIOD_MS = "emit.period.ms";
 
-  public static final String NUM_THREADS = "num.threads";
+  public static final String GRACE_PERIOD_MS = "grace.period.ms";
 
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+  // UC3
+  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
 
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
+  // UC4
+  public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
+
+  public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
 
-  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
 
   private ConfigurationKeys() {}
 
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
index ae2a6dafa3d36dada927d17a1ca00d2df63db78b..8c758c24444ea9c590c364063a397f9b7bfec8f9 100644
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
+++ b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
@@ -13,6 +13,8 @@ import titan.ccp.common.kafka.streams.PropertiesBuilder;
 public abstract class KafkaStreamsBuilder {
 
   // Kafkastreams application specific
+  protected String schemaRegistryUrl; // NOPMD for use in subclass
+
   private String applicationName; // NOPMD
   private String applicationVersion; // NOPMD
   private String bootstrapServers; // NOPMD
@@ -55,6 +57,17 @@ public abstract class KafkaStreamsBuilder {
     return this;
   }
 
+  /**
+   * Sets the URL for the schema registry.
+   *
+   * @param url The URL of the schema registry.
+   * @return
+   */
+  public KafkaStreamsBuilder schemaRegistry(final String url) {
+    this.schemaRegistryUrl = url;
+    return this;
+  }
+
   /**
    * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
    * one for using the default.
@@ -131,9 +144,10 @@ public abstract class KafkaStreamsBuilder {
    */
   public KafkaStreams build() {
     // Check for required attributes for building properties.
-    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
     Objects.requireNonNull(this.applicationName, "Application name has not been set.");
     Objects.requireNonNull(this.applicationVersion, "Application version has not been set.");
+    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
+    Objects.requireNonNull(this.schemaRegistryUrl, "Schema registry has not been set.");
 
     // Create the Kafka streams instance.
     return new KafkaStreams(this.buildTopology(), this.buildProperties());
diff --git a/build.gradle b/build.gradle
index 694a127ca58774bbe8c243e74996e412488adbf0..1e388cb9665b43e004a1854248acc04e1cda387c 100644
--- a/build.gradle
+++ b/build.gradle
@@ -12,9 +12,10 @@ buildscript {
 
 // Variables used to distinct different subprojects
 def useCaseProjects = subprojects.findAll {it -> it.name.matches('uc(.)*')}
+def useCaseApplications = subprojects.findAll {it -> it.name.matches('uc[0-9]+-application')}
+def useCaseGenerators = subprojects.findAll {it -> it.name.matches('uc[0-9]+-workload-generator*')}
 def commonProjects = subprojects.findAll {it -> it.name.matches('(.)*commons(.)*')}
 
-
 // Plugins
 allprojects {
   apply plugin: 'eclipse'
@@ -51,22 +52,22 @@ allprojects {
 	    maven {
 	    	url "https://oss.sonatype.org/content/repositories/snapshots/"
 	    }
+      maven {
+        url 'https://packages.confluent.io/maven/'
+    }
 	}
 }
 
-// Dependencies for all use cases
-configure(useCaseProjects) {
+// Dependencies for all use case applications
+configure(useCaseApplications) {
   dependencies {
-      // These dependencies is exported to consumers, that is to say found on their compile classpath.
-      api('org.industrial-devops:titan-ccp-common:0.0.3-SNAPSHOT') { changing = true }
-      api 'net.kieker-monitoring:kieker:1.14-SNAPSHOT'
-      api 'net.sourceforge.teetime:teetime:3.0'
-
       // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-      implementation 'org.apache.kafka:kafka-clients:2.1.0'
+      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+      implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
       implementation 'com.google.guava:guava:24.1-jre'
       implementation 'org.jctools:jctools-core:2.1.1'
-      implementation 'org.slf4j:slf4j-simple:1.6.1'
+      implementation 'org.slf4j:slf4j-simple:1.7.25'
       implementation project(':application-kafkastreams-commons')
 
       // Use JUnit test framework
@@ -74,15 +75,31 @@ configure(useCaseProjects) {
   }
 }
 
+// Dependencies for all use case generators
+configure(useCaseGenerators) {
+  dependencies {
+      // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+      implementation 'com.google.guava:guava:24.1-jre'
+      implementation 'org.jctools:jctools-core:2.1.1'
+      implementation 'org.slf4j:slf4j-simple:1.7.25'
+
+      // These dependencies are used for the workload-generator-commmon
+      implementation project(':workload-generator-commons')
+
+      // Use JUnit test framework
+      testImplementation 'junit:junit:4.12'
+  }
+}
+
 // Dependencies for all commons
 configure(commonProjects) {
   dependencies {
-      // These dependencies is exported to consumers, that is to say found on their compile classpath.
-      api 'org.apache.kafka:kafka-clients:2.4.0'
-
       // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-      implementation 'org.slf4j:slf4j-simple:1.6.1'
-      implementation('org.industrial-devops:titan-ccp-common:0.0.3-SNAPSHOT') { changing = true }
+      implementation 'org.slf4j:slf4j-simple:1.7.25'
+      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
 
       // Use JUnit test framework
       testImplementation 'junit:junit:4.12'
diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/docker-test/uc1-docker-compose/docker-compose.yml
index ba288cb83cf649030577e6331fee49f46316ee52..d394255951151d931b73e4c923bb10ecaed66a2c 100755
--- a/docker-test/uc1-docker-compose/docker-compose.yml
+++ b/docker-test/uc1-docker-compose/docker-compose.yml
@@ -16,11 +16,11 @@ services:
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
       KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
   uc-app:
-    image: benediktwetzel/uc1-app:latest
+    image: theodolite/theodolite-uc1-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
-    image: benediktwetzel/uc1-wg:latest
+    image: theodolite/theodolite-uc1-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       NUM_SENSORS: 1
diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/docker-test/uc2-docker-compose/docker-compose.yml
index 20a7a73c99c102fe90fa3d4eaa9935dba5298a94..f730148a89d41a819d81a4770e0d53a960dbe493 100755
--- a/docker-test/uc2-docker-compose/docker-compose.yml
+++ b/docker-test/uc2-docker-compose/docker-compose.yml
@@ -16,11 +16,11 @@ services:
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
       KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
   uc-app:
-    image: benediktwetzel/uc2-app:latest
+    image: theodolite/theodolite-uc2-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
-    image: benediktwetzel/uc2-wg:latest
+    image: theodolite/theodolite-uc2-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/docker-test/uc3-docker-compose/docker-compose.yml
index 2cb0d883acc38e0d24434faf4e7af82ff3c42a81..2a3cb23a79f9edda699fe1bb07c1b922614aeb13 100755
--- a/docker-test/uc3-docker-compose/docker-compose.yml
+++ b/docker-test/uc3-docker-compose/docker-compose.yml
@@ -16,12 +16,12 @@ services:
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
       KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
   uc-app:
-    image: benediktwetzel/uc3-app:latest
+    image: theodolite/theodolite-uc3-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       KAFKA_WINDOW_DURATION_MINUTES: 60
   uc-wg: 
-    image: benediktwetzel/uc3-wg:latest
+    image: theodolite/theodolite-uc3-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/docker-test/uc4-docker-compose/docker-compose.yml
index be945cefe92fe75503187fb6b94ff6c951e1b8f2..1f015f23b2e8b98eba27ae6f387adb123ae2ccc2 100755
--- a/docker-test/uc4-docker-compose/docker-compose.yml
+++ b/docker-test/uc4-docker-compose/docker-compose.yml
@@ -26,11 +26,11 @@ services:
       SCHEMA_REGISTRY_HOST_NAME: schema-registry
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
-    image: soerenhenning/uc4-app:latest #TODO
+    image: theodolite/theodolite-uc4-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
   uc-wg: 
-    image: soerenhenning/uc4-wg:latest #TODO
+    image: theodolite/theodolite-uc4-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
diff --git a/docs/release-process.md b/docs/release-process.md
new file mode 100644
index 0000000000000000000000000000000000000000..097890f5fb446f69902c0537fefe4f0f0a2c2bd5
--- /dev/null
+++ b/docs/release-process.md
@@ -0,0 +1,18 @@
+# Release Process
+
+We assume that we are creating the release `v0.1.1`. Please make sure to update
+to modify the following steps according to the release, you are actually
+performing.
+
+1. Create a new branch `v0.1` if not already exists. This branch will never
+again be merged into master.
+
+2. Checkout the `v0.1 branch.
+
+3. Update all references to Theodolite Docker images to tag `v0-1-1`. These are
+mainly the Kubernetes resource definitions in `execution` as well as the Docker
+Compose files in `docker-test`.
+
+4. Commit these changes.
+
+5. Tag this commit with `v0.1.1`. The corresponding Docker images will be uploaded.
diff --git a/execution/README.md b/execution/README.md
index 89a851a9c8bafd29a4232f142e7b0da8b88c1132..7880211772fe48513b12266dd9468b0993f90391 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -102,7 +102,15 @@ Other Kafka deployments, for example, using Strimzi, should work in a similar wa
 A permanently running pod used for Kafka configuration is started via:
 
 ```sh
-kubectl apply -f infrastructure/kafka/kafka-client.yaml 
+kubectl apply -f infrastructure/kafka/kafka-client.yaml
+```
+
+#### A Zookeeper Client Pod
+
+Also a permanently running pod for ZooKeeper access is started via:
+
+```sh
+kubectl apply -f infrastructure/zookeeper-client.yaml
 ```
 
 #### The Kafka Lag Exporter
@@ -142,15 +150,15 @@ Depending on your setup, some additional adjustments may be necessary:
 
 ## Execution
 
-The `./run_loop.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
+The `./theodolite.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
 
 ```sh
-./run_loop.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
+./theodolite.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
 ```
 
 * `<use-case>`: Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`.
-* `<wl-values>`: Values for the workload generator to be tested, separated by commas. For example `100000, 200000, 300000`.
-* `<instances>`: Numbers of instances to be benchmarked, separated by commas. For example `1, 2, 3, 4`.
+* `<wl-values>`: Values for the workload generator to be tested, separated by commas and quoted. For example `"100000, 200000, 300000"`.
+* `<instances>`: Numbers of instances to be benchmarked, separated by commas and quoted. For example `"1, 2, 3, 4"`.
 * `<partitions>`: Number of partitions for Kafka topics. Optional. Default `40`.
 * `<cpu-limit>`: Kubernetes CPU limit. Optional. Default `1000m`.
 * `<memory-limit>`: Kubernetes memory limit. Optional. Default `4Gi`.
diff --git a/execution/infrastructure/grafana/values.yaml b/execution/infrastructure/grafana/values.yaml
index 16f075745660e9fd522f37108d0479a8b6f997b4..211a72a61a2699c7108ec4adb9a7edebbccecb69 100644
--- a/execution/infrastructure/grafana/values.yaml
+++ b/execution/infrastructure/grafana/values.yaml
@@ -47,8 +47,7 @@ sidecar:
     # If specified, the sidecar will search for datasource config-maps inside this namespace.
     # Otherwise the namespace in which the sidecar is running will be used.
     # It's also possible to specify ALL to search in all namespaces
-    searchNamespace: default
-
+    searchNamespace: null
 
 service:
   nodePort: 31199
diff --git a/execution/infrastructure/kafka/kafka-client.yaml b/execution/infrastructure/kafka/kafka-client.yaml
index 4c7d3ed239faed62022c110e92b264b338a8c9a4..a03e76c0a8215fee5bf1c512b859ea8501b9df0f 100644
--- a/execution/infrastructure/kafka/kafka-client.yaml
+++ b/execution/infrastructure/kafka/kafka-client.yaml
@@ -1,7 +1,7 @@
 apiVersion: v1
 kind: Pod
 metadata:
-  name: kafka-client-2
+  name: kafka-client
 spec:
   containers:
   - name: kafka-client
diff --git a/execution/infrastructure/prometheus/cluster-role-binding.yaml b/execution/infrastructure/prometheus/cluster-role-binding.yaml
index 5369e02aac84440053b3be5485f0644419d981d1..db2717cddcea180f84bb68377ba6daad37c33296 100644
--- a/execution/infrastructure/prometheus/cluster-role-binding.yaml
+++ b/execution/infrastructure/prometheus/cluster-role-binding.yaml
@@ -9,4 +9,4 @@ roleRef:
 subjects:
 - kind: ServiceAccount
   name: prometheus
-  namespace: titan-scalability
\ No newline at end of file
+  namespace: default
\ No newline at end of file
diff --git a/execution/infrastructure/zookeeper-client.yaml b/execution/infrastructure/zookeeper-client.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9d6d00275c46a888ed4e8ff08533ca245c4a684c
--- /dev/null
+++ b/execution/infrastructure/zookeeper-client.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: zookeeper-client
+spec:
+  containers:
+  - name: zookeeper-client
+    image: confluentinc/cp-zookeeper:5.4.0
+    command:
+      - sh
+      - -c
+      - "exec tail -f /dev/null"
diff --git a/execution/run_uc1-new.sh b/execution/run_uc1.sh
similarity index 84%
rename from execution/run_uc1-new.sh
rename to execution/run_uc1.sh
index 136a395793d981b3a8f0a2582951a513f6157698..9ab082a523e2deed3e872dc24e4624d328c8fa2f 100755
--- a/execution/run_uc1-new.sh
+++ b/execution/run_uc1.sh
@@ -56,6 +56,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: $REPLICAS
   template:
     spec:
       containers:
@@ -69,7 +70,6 @@ spec:
             cpu: $CPU_LIMIT
 EOF
 kubectl apply -k uc-application/overlay/uc1-application
-kubectl scale deployment uc1-titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
 sleep $(($EXECUTION_MINUTES * 60))
@@ -97,7 +97,7 @@ echo "Finished execution, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
@@ -106,6 +106,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc2-new.sh b/execution/run_uc2.sh
similarity index 73%
rename from execution/run_uc2-new.sh
rename to execution/run_uc2.sh
index 88db87087e2386744c26366171addc46aaf73443..741acaa89e84b3e58ee0bfaceddd4cca08081f20 100755
--- a/execution/run_uc2-new.sh
+++ b/execution/run_uc2.sh
@@ -22,17 +22,21 @@ echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
 #PARTITIONS=40
 #kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
 PARTITIONS=$PARTITIONS
-kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
+kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic aggregation-feedback --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
 
 # Start workload generator
 NUM_NESTED_GROUPS=$DIM_VALUE
+WL_MAX_RECORDS=150000
+APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS))
+WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
+
 cat <<EOF >uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: titan-ccp-load-generator
 spec:
-  replicas: 1
+  replicas: $WL_INSTANCES
   template:
     spec:
       containers:
@@ -44,6 +48,8 @@ spec:
           value: "full"
         - name: NUM_NESTED_GROUPS
           value: "$NUM_NESTED_GROUPS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
 EOF
 kubectl apply -k uc-workload-generator/overlay/uc2-workload-generator
 
@@ -55,6 +61,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: $REPLICAS
   template:
     spec:
       containers:
@@ -68,7 +75,6 @@ spec:
             cpu: $CPU_LIMIT
 EOF
 kubectl apply -k uc-application/overlay/uc2-application
-kubectl scale deployment uc2-titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
 sleep $(($EXECUTION_MINUTES * 60))
@@ -94,9 +100,9 @@ kubectl delete -k uc-application/overlay/uc2-application
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
 echo "Finished execution, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
-while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
+while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|aggregation-feedback|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|aggregation-feedback|output|configuration|theodolite-.*' --if-exists"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
@@ -105,6 +111,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc3-new.sh b/execution/run_uc3.sh
similarity index 85%
rename from execution/run_uc3-new.sh
rename to execution/run_uc3.sh
index 46084c7a889a29019de0b5edbe9c9bbf61fe329a..ff61e7587557e54e7f2af2cdba9f1a0983231c6a 100755
--- a/execution/run_uc3-new.sh
+++ b/execution/run_uc3.sh
@@ -57,6 +57,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: $REPLICAS
   template:
     spec:
       containers:
@@ -97,7 +98,7 @@ echo "Finished execution, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
@@ -106,6 +107,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc4-new.sh b/execution/run_uc4.sh
similarity index 80%
rename from execution/run_uc4-new.sh
rename to execution/run_uc4.sh
index 8c7eef3a762a90b3b63b9569f6d956ec5603c71e..4d558dca3beab211475bf787f9293b13411c33e0 100755
--- a/execution/run_uc4-new.sh
+++ b/execution/run_uc4.sh
@@ -26,13 +26,16 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
 
 # Start workload generator
 NUM_SENSORS=$DIM_VALUE
-cat <<EOF >uuc-workload-generator/overlay/c4-workload-generator/set_paramters.yaml
+WL_MAX_RECORDS=150000
+WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
+
+cat <<EOF >uuc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
 apiVersion: apps/v1
 kind: StatefulSet
 metadata:
   name: titan-ccp-load-generator
 spec:
-  replicas: 1
+  replicas: $WL_INSTANCES
   template:
     spec:
       containers:
@@ -40,6 +43,8 @@ spec:
         env:
         - name: NUM_SENSORS
           value: "$NUM_SENSORS"
+        - name: INSTANCES
+          value: "$WL_INSTANCES"
 EOF
 kubectl apply -k uc-workload-generator/overlay/uc4-workload-generator
 
@@ -51,6 +56,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: $REPLICAS
   template:
     spec:
       containers:
@@ -78,7 +84,6 @@ deactivate
 kubectl delete -k uc-workload-generator/overlay/uc4-workload-generator
 kubectl delete -k uc-application/overlay/uc4-application
 
-
 # Delete topics instead of Kafka
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
 # kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*'
@@ -92,7 +97,7 @@ echo "Finished execution, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
@@ -101,6 +106,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_loop.sh b/execution/theodolite.sh
similarity index 88%
rename from execution/run_loop.sh
rename to execution/theodolite.sh
index b139ad6ff3e1950baa3d7f4579f574f7231ecb5f..18a6b67a9c321cd1c0ecebca405169ec5b8ade46 100755
--- a/execution/run_loop.sh
+++ b/execution/theodolite.sh
@@ -35,7 +35,7 @@ do
     do
         SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1))
         echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA"
-        ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
+        ./run_uc$UC.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
         sleep 10s
     done
 done
diff --git a/execution/uc-application/base/aggregation-deployment.yaml b/execution/uc-application/base/aggregation-deployment.yaml
index 19a63fc4265ff1ff3eb814faec53b365fdb94304..70f50e0b4a18d8dfd66967aec7d0154c8a6fd285 100644
--- a/execution/uc-application/base/aggregation-deployment.yaml
+++ b/execution/uc-application/base/aggregation-deployment.yaml
@@ -22,6 +22,8 @@ spec:
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: COMMIT_INTERVAL_MS
           value: "100"
         - name: JAVA_OPTS
diff --git a/execution/uc-application/overlay/uc1-application/kustomization.yaml b/execution/uc-application/overlay/uc1-application/kustomization.yaml
index 5dc746cc26ab31e96f925717b2145a3e3b89aee3..0d3820fe392e1d2224d78a8dd2415c4dce37c6e6 100644
--- a/execution/uc-application/overlay/uc1-application/kustomization.yaml
+++ b/execution/uc-application/overlay/uc1-application/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc1-
 
 images:
   - name: uc-app
-    newName: soerenhenning/uc1-app
+    newName: theodolite/theodolite-uc1-kstreams-app
     newTag: latest
 
 bases:
diff --git a/execution/uc-application/overlay/uc1-application/set_paramters.yaml b/execution/uc-application/overlay/uc1-application/set_paramters.yaml
index 57611fabd1a89a8655ce03b9cb8a059e1f695e77..cb85048128774ab421b89338d5b1ce23791acac8 100644
--- a/execution/uc-application/overlay/uc1-application/set_paramters.yaml
+++ b/execution/uc-application/overlay/uc1-application/set_paramters.yaml
@@ -3,6 +3,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: 1
   template:
     spec:
       containers:
diff --git a/execution/uc-application/overlay/uc2-application/kustomization.yaml b/execution/uc-application/overlay/uc2-application/kustomization.yaml
index 8c62daebd716c79451ce065c3e9d73bc96e8ff25..cd32cabf70fdfa666a5703c97bc4e4fad7800ba7 100644
--- a/execution/uc-application/overlay/uc2-application/kustomization.yaml
+++ b/execution/uc-application/overlay/uc2-application/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc2-
 
 images:
   - name: uc-app
-    newName: soerenhenning/uc2-app
+    newName: theodolite/theodolite-uc2-kstreams-app
     newTag: latest
 
 bases:
diff --git a/execution/uc-application/overlay/uc2-application/set_paramters.yaml b/execution/uc-application/overlay/uc2-application/set_paramters.yaml
index 57611fabd1a89a8655ce03b9cb8a059e1f695e77..cb85048128774ab421b89338d5b1ce23791acac8 100644
--- a/execution/uc-application/overlay/uc2-application/set_paramters.yaml
+++ b/execution/uc-application/overlay/uc2-application/set_paramters.yaml
@@ -3,6 +3,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: 1
   template:
     spec:
       containers:
diff --git a/execution/uc-application/overlay/uc3-application/kustomization.yaml b/execution/uc-application/overlay/uc3-application/kustomization.yaml
index 0a898a9ca0ecd98e1ea411ce9997aed020351428..5722cbca8cc79247063921a55252435804edefe6 100644
--- a/execution/uc-application/overlay/uc3-application/kustomization.yaml
+++ b/execution/uc-application/overlay/uc3-application/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc3-
 
 images:
   - name: uc-app
-    newName: soerenhenning/uc3-app
+    newName: theodolite/theodolite-uc3-kstreams-app
     newTag: latest
 
 bases:
diff --git a/execution/uc-application/overlay/uc3-application/set_paramters.yaml b/execution/uc-application/overlay/uc3-application/set_paramters.yaml
index d25865a7f39e29aaa3f6f22d19b682cc78179de0..cb85048128774ab421b89338d5b1ce23791acac8 100644
--- a/execution/uc-application/overlay/uc3-application/set_paramters.yaml
+++ b/execution/uc-application/overlay/uc3-application/set_paramters.yaml
@@ -3,13 +3,14 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: 1
   template:
     spec:
       containers:
       - name: uc-application
         env:
         - name: COMMIT_INTERVAL_MS
-          value: 100
+          value: "100"
         resources:
           limits:
             memory: 4Gi
diff --git a/execution/uc-application/overlay/uc4-application/kustomization.yaml b/execution/uc-application/overlay/uc4-application/kustomization.yaml
index 00ad540e66b0ccfd1f4322c6f04951faf344b3bd..b44a9bb643802735b740b74bdb47299fb413e5d3 100644
--- a/execution/uc-application/overlay/uc4-application/kustomization.yaml
+++ b/execution/uc-application/overlay/uc4-application/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc4-
 
 images:
   - name: uc-app
-    newName: soerenhenning/uc4-app
+    newName: theodolite/theodolite-uc4-kstreams-app
     newTag: latest
 
 bases:
diff --git a/execution/uc-application/overlay/uc4-application/set_paramters.yaml b/execution/uc-application/overlay/uc4-application/set_paramters.yaml
index 57611fabd1a89a8655ce03b9cb8a059e1f695e77..cb85048128774ab421b89338d5b1ce23791acac8 100644
--- a/execution/uc-application/overlay/uc4-application/set_paramters.yaml
+++ b/execution/uc-application/overlay/uc4-application/set_paramters.yaml
@@ -3,6 +3,7 @@ kind: Deployment
 metadata:
   name: titan-ccp-aggregation
 spec:
+  replicas: 1
   template:
     spec:
       containers:
diff --git a/execution/uc-workload-generator/base/workloadGenerator.yaml b/execution/uc-workload-generator/base/workloadGenerator.yaml
index 71d349de66ba92953dc6030aee47f5206cc3cb06..1f21776c7f33bb4348decf8099f6f97669d33047 100644
--- a/execution/uc-workload-generator/base/workloadGenerator.yaml
+++ b/execution/uc-workload-generator/base/workloadGenerator.yaml
@@ -1,5 +1,5 @@
 apiVersion: apps/v1
-kind: StatefulSet
+kind: Deployment
 metadata:
   name: titan-ccp-load-generator
 spec:
@@ -16,10 +16,16 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: workload-generator:latest
+        image: uc-workload-generator:latest
         env:
+        - name: ZK_HOST
+          value: "my-confluent-cp-zookeeper"
+        - name: ZK_PORT
+          value: "2181"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: NUM_SENSORS
           value: "25000"
         - name: INSTANCES
diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
index 564468c06d4423410ba072ab1818751f37b8999f..553b769a3bacd3356d6b5af5ba2e865acdd47a7c 100644
--- a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
+++ b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc1-
 
 images:
   - name: workload-generator
-    newName: soerenhenning/uc1-wg
+    newName: theodolite/theodolite-uc1-workload-generator
     newTag: latest
 
 bases:
diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
index 61fc37cfb3b840a349c75b2b9c82ea21f804632a..ff68743355d55459f2df988e8dd42bf0b3b6ae64 100644
--- a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
+++ b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc2-
 
 images:
   - name: workload-generator
-    newName: soerenhenning/uc2-wg
+    newName: theodolite/theodolite-uc2-workload-generator
     newTag: latest
 
 bases:
diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
index 0f17f661eccbe1412d7b5a9d5d299c2f5167db52..9fdac91cba8e31f36cf9778c5114a123a8868f35 100644
--- a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
+++ b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
@@ -14,5 +14,6 @@ spec:
         - name: HIERARCHY
           value: "full"
         - name: NUM_NESTED_GROUPS
-          value: "25000"
-
+          value: "5"
+        - name: INSTANCES
+          value: "1"
diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
index fb794489de4ff4e0892edb48a464bd6fb18edeb2..a7022480fcfe401f3e4e4c3898c3d79930198d3e 100644
--- a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
+++ b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc3-
 
 images:
   - name: workload-generator
-    newName: soerenhenning/uc3-wg
+    newName: theodolite/theodolite-uc3-workload-generator
     newTag: latest
 
 bases:
diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
index acf6833ba909fb286fabe83abd78cfec3ada6d71..5efb0eb25a26371cdddfcc7969a2d10131dbb448 100644
--- a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
+++ b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml
@@ -5,7 +5,7 @@ namePrefix: uc4-
 
 images:
   - name: workload-generator
-    newName: soerenhenning/uc4-wg
+    newName: theodolite/theodolite-uc4-workload-generator
     newTag: latest
 
 bases:
diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
index 50ca424fbac14502ed51ed441d4a1787fc9b1063..a63afa46fe0ecc4602ace583cc4fa0a7e4943366 100644
--- a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
+++ b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml
@@ -11,3 +11,5 @@ spec:
         env:
         - name: NUM_SENSORS
           value: "25000"
+        - name: INSTANCES
+          value: "1"
diff --git a/settings.gradle b/settings.gradle
index 51112256b1a124d07ad80caf7ac0ccaf697858d3..9104525ce160a25957f9731f820a723b4f36f7d5 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -1,5 +1,6 @@
 rootProject.name = 'scalability-benchmarking'
 
+include 'workload-generator-commons'
 include 'application-kafkastreams-commons'
 
 include 'uc1-workload-generator'
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/ConfigurationKeys.java b/uc1-application/src/main/java/theodolite/uc1/application/ConfigurationKeys.java
deleted file mode 100644
index ee4113c3088629fe01988721e32d9704f5d30da5..0000000000000000000000000000000000000000
--- a/uc1-application/src/main/java/theodolite/uc1/application/ConfigurationKeys.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package theodolite.uc1.application;
-
-/**
- * Keys to access configuration parameters.
- */
-public final class ConfigurationKeys {
-
-  public static final String APPLICATION_NAME = "application.name";
-
-  public static final String APPLICATION_VERSION = "application.version";
-
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
-  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
-
-  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
-
-
-  private ConfigurationKeys() {}
-
-}
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java b/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
index b551fb7f8ff74f5ddc7e3aad901c1412075c6da6..a35cc37b36fb906e5c5495006126374d4de4656c 100644
--- a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
+++ b/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
@@ -3,8 +3,9 @@ package theodolite.uc1.application;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc1.streamprocessing.Uc1KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -13,7 +14,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class HistoryService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
 
@@ -40,6 +41,7 @@ public class HistoryService {
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
index 824a8dadd4d80dd29d09b21543fa6da6aedf5365..1c30e0c2c83b3d8a2f3dca4df0c7aec99cc4f450 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
+++ b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
@@ -7,8 +7,8 @@ import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.kstream.Consumed;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.ActivePowerRecordFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
@@ -18,14 +18,19 @@ public class TopologyBuilder {
   private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
 
   private final String inputTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
+
   private final Gson gson = new Gson();
   private final StreamsBuilder builder = new StreamsBuilder();
 
+
   /**
    * Create a new {@link TopologyBuilder} using the given topics.
    */
-  public TopologyBuilder(final String inputTopic) {
+  public TopologyBuilder(final String inputTopic,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory) {
     this.inputTopic = inputTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
   }
 
   /**
@@ -35,7 +40,7 @@ public class TopologyBuilder {
     this.builder
         .stream(this.inputTopic, Consumed.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+            this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
         .mapValues(v -> this.gson.toJson(v))
         .foreach((k, v) -> LOGGER.info("Key: " + k + " Value: " + v));
 
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
index 4af3f130373d0596232921b9c5cc0b48df573b72..7699ecb48369a2041777b901931c46072a10d99f 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
+++ b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
@@ -3,6 +3,7 @@ package theodolite.uc1.streamprocessing;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -18,6 +19,7 @@ public class Uc1KafkaStreamsBuilder extends KafkaStreamsBuilder {
   @Override
   protected Topology buildTopology() {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
-    return new TopologyBuilder(this.inputTopic).build();
+    return new TopologyBuilder(this.inputTopic,
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl)).build();
   }
 }
diff --git a/uc1-application/src/main/resources/META-INF/application.properties b/uc1-application/src/main/resources/META-INF/application.properties
index 9dcbb9a64be111c2ea1db006081b983c9007b140..3fb301516daa4c7e14875d3d9ca9df9c770eb69e 100644
--- a/uc1-application/src/main/resources/META-INF/application.properties
+++ b/uc1-application/src/main/resources/META-INF/application.properties
@@ -5,6 +5,8 @@ kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
 kafka.output.topic=output
 
+schema.registry.url=http://localhost:8091
+
 num.threads=1
 commit.interval.ms=100
 cache.max.bytes.buffering=-1
diff --git a/uc1-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc1-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
index bcff74b9a5a4efc72ce1f206f5f10c13557eafd7..a7b27dfdb25760f0b96c930c9705c2eed0402442 100644
--- a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
+++ b/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
@@ -1,92 +1,94 @@
 package theodolite.uc1.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.models.records.ActivePowerRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+import titan.ccp.model.records.ActivePowerRecord;
 
-public class LoadGenerator {
+/**
+ * Load Generator for UC1.
+ */
+public final class LoadGenerator {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
-  private static final int WL_MAX_RECORDS = 150_000;
+  private static final long MAX_DURATION_IN_DAYS = 30L;
 
+  private LoadGenerator() {}
+
+  /**
+   * Entry point.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
+    // uc1
     LOGGER.info("Start workload generator for use case UC1.");
 
+    // get environment variables
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
     final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
-    final int instanceId = getInstanceId();
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"),
+        "4"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
-    final int idStart = instanceId * WL_MAX_RECORDS;
-    final int idEnd = Math.min((instanceId + 1) * WL_MAX_RECORDS, numSensors);
-    LOGGER.info("Generating data for sensors with IDs from {} to {} (exclusive).", idStart, idEnd);
-    final List<String> sensors = IntStream.range(idStart, idEnd)
-        .mapToObj(i -> "s_" + i)
-        .collect(Collectors.toList());
-
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers,
-        kafkaInputTopic,
-        r -> r.getIdentifier(),
-        r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
 
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
 
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", numSensors))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
 
+    // start
+    workloadGenerator.start();
   }
-
-  private static int getInstanceId() {
-    final String podName = System.getenv("POD_NAME");
-    if (podName == null) {
-      return 0;
-    } else {
-      return Pattern.compile("-")
-          .splitAsStream(podName)
-          .reduce((p, x) -> x)
-          .map(Integer::parseInt)
-          .orElse(0);
-    }
-  }
-
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java b/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
index 06a6d9ccbf6750290335cd7389391eb613b1569a..c094adfcd7952e81115dae84ed9c0d371e380c98 100644
--- a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
+++ b/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
@@ -4,8 +4,9 @@ import java.time.Duration;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc2.streamprocessing.Uc2KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -14,7 +15,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class AggregationService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
 
@@ -39,16 +40,18 @@ public class AggregationService {
     final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder();
     uc2KafkaStreamsBuilder
         .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
+        .feedbackTopic(this.config.getString(ConfigurationKeys.KAFKA_FEEDBACK_TOPIC))
         .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
-        .configurationTopic(this.config.getString(ConfigurationKeys.CONFIGURATION_KAFKA_TOPIC))
-        .windowSize(Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_SIZE_MS)))
-        .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_GRACE_MS)));
+        .configurationTopic(this.config.getString(ConfigurationKeys.KAFKA_CONFIGURATION_TOPIC))
+        .emitPeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.EMIT_PERIOD_MS)))
+        .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.GRACE_PERIOD_MS)));
 
     // Configuration of the stream application
     final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder
         .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
         .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
diff --git a/uc2-application/src/main/java/theodolite/uc2/application/ConfigurationKeys.java b/uc2-application/src/main/java/theodolite/uc2/application/ConfigurationKeys.java
deleted file mode 100644
index 78d72af1d3eb3585606d349166f6bafdf1048b48..0000000000000000000000000000000000000000
--- a/uc2-application/src/main/java/theodolite/uc2/application/ConfigurationKeys.java
+++ /dev/null
@@ -1,31 +0,0 @@
-package theodolite.uc2.application;
-
-/**
- * Keys to access configuration parameters.
- */
-public final class ConfigurationKeys {
-  public static final String APPLICATION_NAME = "application.name";
-
-  public static final String APPLICATION_VERSION = "application.version";
-
-  public static final String CONFIGURATION_KAFKA_TOPIC = "configuration.kafka.topic";
-
-  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
-
-  public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
-
-  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
-
-  public static final String WINDOW_SIZE_MS = "window.size.ms";
-
-  public static final String WINDOW_GRACE_MS = "window.grace.ms";
-
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
-  private ConfigurationKeys() {}
-
-}
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerFactory.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerFactory.java
deleted file mode 100644
index 3060fdaaf2605766df93b767e50e426c5ebafae9..0000000000000000000000000000000000000000
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerFactory.java
+++ /dev/null
@@ -1,52 +0,0 @@
-package theodolite.uc2.streamprocessing;
-
-import java.util.Map;
-import java.util.Optional;
-import java.util.Set;
-import org.apache.kafka.common.serialization.Serdes;
-import org.apache.kafka.streams.KeyValue;
-import org.apache.kafka.streams.kstream.TransformerSupplier;
-import org.apache.kafka.streams.state.KeyValueStore;
-import org.apache.kafka.streams.state.StoreBuilder;
-import org.apache.kafka.streams.state.Stores;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-
-/**
- * Factory class configuration required by {@link ChildParentsTransformer}.
- */
-public class ChildParentsTransformerFactory {
-
-  private static final String STORE_NAME = "CHILD-PARENTS-TRANSFORM-STATE";
-
-  /**
-   * Returns a {@link TransformerSupplier} for {@link ChildParentsTransformer}.
-   */
-  public TransformerSupplier<Event, SensorRegistry, Iterable<KeyValue<String, Optional<Set<String>>>>> getTransformerSupplier() { // NOCS
-    return new TransformerSupplier<>() {
-      @Override
-      public ChildParentsTransformer get() {
-        return new ChildParentsTransformer(STORE_NAME);
-      }
-    };
-  }
-
-  /**
-   * Returns a {@link StoreBuilder} for {@link ChildParentsTransformer}.
-   */
-  public StoreBuilder<KeyValueStore<String, Set<String>>> getStoreBuilder() {
-    return Stores.keyValueStoreBuilder(
-        Stores.persistentKeyValueStore(STORE_NAME),
-        Serdes.String(),
-        ParentsSerde.serde())
-        .withLoggingEnabled(Map.of());
-  }
-
-  /**
-   * Returns the store name for {@link ChildParentsTransformer}.
-   */
-  public String getStoreName() {
-    return STORE_NAME;
-  }
-
-}
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..2b2d71c2f95d052cee19394e3e62e674776f8627
--- /dev/null
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java
@@ -0,0 +1,40 @@
+package theodolite.uc2.streamprocessing;
+
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.kstream.Transformer;
+import org.apache.kafka.streams.kstream.TransformerSupplier;
+import org.apache.kafka.streams.state.KeyValueStore;
+import org.apache.kafka.streams.state.StoreBuilder;
+import org.apache.kafka.streams.state.Stores;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+/**
+ * Supplier class for a {@link ChildParentsTransformer}.
+ */
+public class ChildParentsTransformerSupplier implements
+    TransformerSupplier<Event, SensorRegistry, Iterable<KeyValue<String, Optional<Set<String>>>>> {
+
+  private static final String STORE_NAME = "CHILD-PARENTS-TRANSFORM-STATE";
+
+  @Override
+  public Transformer<Event, SensorRegistry, Iterable<KeyValue<String, Optional<Set<String>>>>> get() { // NOCS
+    return new ChildParentsTransformer(STORE_NAME);
+  }
+
+  @Override
+  public Set<StoreBuilder<?>> stores() {
+    final StoreBuilder<KeyValueStore<String, Set<String>>> store = Stores.keyValueStoreBuilder(
+        Stores.persistentKeyValueStore(STORE_NAME),
+        Serdes.String(),
+        ParentsSerde.serde())
+        .withLoggingEnabled(Map.of());
+
+    return Set.of(store);
+  }
+
+}
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
index 0555df96c153065ecf9be2bf2ead10de60d55cbf..724c7f6e2eaebc7be53f03b89d143d885c4a055c 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
@@ -9,7 +9,7 @@ import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.kstream.Transformer;
 import org.apache.kafka.streams.processor.ProcessorContext;
 import org.apache.kafka.streams.state.KeyValueStore;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Transforms the join result of an {@link ActivePowerRecord} and the corresponding sensor parents
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java
deleted file mode 100644
index b78eec51e1cd9e717f79b075e5e27230af56dbe7..0000000000000000000000000000000000000000
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package theodolite.uc2.streamprocessing;
-
-import java.util.Map;
-import java.util.Set;
-import org.apache.kafka.common.serialization.Serdes;
-import org.apache.kafka.streams.KeyValue;
-import org.apache.kafka.streams.kstream.TransformerSupplier;
-import org.apache.kafka.streams.state.KeyValueStore;
-import org.apache.kafka.streams.state.StoreBuilder;
-import org.apache.kafka.streams.state.Stores;
-import titan.ccp.models.records.ActivePowerRecord;
-
-/**
- * Factory class configuration required by {@link JointFlatTransformerFactory}.
- */
-public class JointFlatTransformerFactory {
-
-  private static final String STORE_NAME = "JOINT-FLAT-MAP-TRANSFORM-STATE";
-
-  /**
-   * Returns a {@link TransformerSupplier} for {@link JointFlatTransformer}.
-   */
-  public TransformerSupplier<String, JointRecordParents, Iterable<KeyValue<SensorParentKey, ActivePowerRecord>>> getTransformerSupplier() { // NOCS
-    return new TransformerSupplier<>() {
-      @Override
-      public JointFlatTransformer get() {
-        return new JointFlatTransformer(STORE_NAME);
-      }
-    };
-  }
-
-  /**
-   * Returns a {@link StoreBuilder} for {@link JointFlatTransformer}.
-   */
-  public StoreBuilder<KeyValueStore<String, Set<String>>> getStoreBuilder() {
-    return Stores.keyValueStoreBuilder(
-        Stores.persistentKeyValueStore(STORE_NAME),
-        Serdes.String(),
-        ParentsSerde.serde())
-        .withLoggingEnabled(Map.of());
-  }
-
-  /**
-   * Returns the store name for {@link JointFlatTransformer}.
-   */
-  public String getStoreName() {
-    return STORE_NAME;
-  }
-
-}
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
new file mode 100644
index 0000000000000000000000000000000000000000..7d9a7df3d465260623abef2b13e9f3765925bc57
--- /dev/null
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java
@@ -0,0 +1,38 @@
+package theodolite.uc2.streamprocessing;
+
+import java.util.Map;
+import java.util.Set;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.kstream.Transformer;
+import org.apache.kafka.streams.kstream.TransformerSupplier;
+import org.apache.kafka.streams.state.KeyValueStore;
+import org.apache.kafka.streams.state.StoreBuilder;
+import org.apache.kafka.streams.state.Stores;
+import titan.ccp.model.records.ActivePowerRecord;
+
+/**
+ * Supplier class for {@link JointFlatTransformerSupplier}.
+ */
+public class JointFlatTransformerSupplier implements
+    TransformerSupplier<String, JointRecordParents, Iterable<KeyValue<SensorParentKey, ActivePowerRecord>>> { // NOCS
+
+  private static final String STORE_NAME = "JOINT-FLAT-MAP-TRANSFORM-STATE";
+
+  @Override
+  public Transformer<String, JointRecordParents, Iterable<KeyValue<SensorParentKey, ActivePowerRecord>>> get() { // NOCS
+    return new JointFlatTransformer(STORE_NAME);
+  }
+
+  @Override
+  public Set<StoreBuilder<?>> stores() {
+    final StoreBuilder<KeyValueStore<String, Set<String>>> store = Stores.keyValueStoreBuilder(
+        Stores.persistentKeyValueStore(STORE_NAME),
+        Serdes.String(),
+        ParentsSerde.serde())
+        .withLoggingEnabled(Map.of());
+
+    return Set.of(store);
+  }
+
+}
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
index 02b7318587a77228e7fb2f7dc1b3350bac532c89..cba05f1ed8e585d5c31aaa92207e0d2854436736 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
@@ -1,7 +1,8 @@
 package theodolite.uc2.streamprocessing;
 
+import java.util.Objects;
 import java.util.Set;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * A joined pair of an {@link ActivePowerRecord} and its associated parents. Both the record and the
@@ -26,6 +27,27 @@ public class JointRecordParents {
     return this.record;
   }
 
+  @Override
+  public String toString() {
+    return "{" + this.parents + ", " + this.record + "}";
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.parents, this.record);
+  }
 
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (obj instanceof JointRecordParents) {
+      final JointRecordParents other = (JointRecordParents) obj;
+      return Objects.equals(this.parents, other.parents)
+          && Objects.equals(this.record, other.record);
+    }
+    return false;
+  }
 
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
index 10fb98c9c575bde508a7e24c9e825b25475eff76..9564e994da8fc909147bec76097c737f14247868 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
@@ -1,8 +1,8 @@
 package theodolite.uc2.streamprocessing;
 
 import org.apache.kafka.streams.kstream.Windowed;
-import titan.ccp.models.records.ActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.records.AggregatedActivePowerRecord;
 
 /**
  * Updates an {@link AggregatedActivePowerRecord} by a new {@link ActivePowerRecord}.
@@ -19,7 +19,7 @@ public class RecordAggregator {
     final double average = count == 0 ? 0.0 : sum / count;
     return new AggregatedActivePowerRecord(
         identifier.key(), record.getTimestamp(),
-        0.0, 0.0, count, sum, average);
+        count, sum, average);
   }
 
   /**
@@ -32,8 +32,7 @@ public class RecordAggregator {
     final double average = count == 0 ? 0.0 : sum / count;
     return new AggregatedActivePowerRecord(
         // TODO timestamp -1 indicates that this record is emitted by an substract event
-        identifier.key(), -1,
-        0.0, 0.0, count, sum, average);
+        identifier.key(), -1L, count, sum, average);
   }
 
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
index d65c93034a0fc9a801cf5be0c2f7f50e38d9178e..a4fb5b33966882b94d46c96282bdaaed92d67ebd 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
@@ -1,5 +1,7 @@
 package theodolite.uc2.streamprocessing;
 
+import java.util.Objects;
+
 /**
  * A key consisting of the identifier of a sensor and an identifier of parent sensor.
  */
@@ -27,4 +29,22 @@ public class SensorParentKey {
     return "{" + this.sensorIdentifier + ", " + this.parentIdentifier + "}";
   }
 
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.sensorIdentifier, this.parentIdentifier);
+  }
+
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (obj instanceof SensorParentKey) {
+      final SensorParentKey other = (SensorParentKey) obj;
+      return Objects.equals(this.sensorIdentifier, other.sensorIdentifier)
+          && Objects.equals(this.parentIdentifier, other.parentIdentifier);
+    }
+    return false;
+  }
+
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
index b6c46fa3a1822cbf1a11e3a8399aa7a061283952..c09fa3ead7553bda5cd8e8f09079f846b89d5d17 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
@@ -1,6 +1,5 @@
 package theodolite.uc2.streamprocessing;
 
-import com.google.common.math.StatsAccumulator;
 import java.time.Duration;
 import java.util.Set;
 import org.apache.kafka.common.serialization.Serdes;
@@ -18,46 +17,61 @@ import org.apache.kafka.streams.kstream.Suppressed.BufferConfig;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.apache.kafka.streams.kstream.Windowed;
 import org.apache.kafka.streams.kstream.WindowedSerdes;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.configuration.events.EventSerde;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.records.AggregatedActivePowerRecord;
 import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-import titan.ccp.models.records.ActivePowerRecordFactory;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
  */
 public class TopologyBuilder {
-
-  // private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
-
+  // Streams Variables
   private final String inputTopic;
+  private final String feedbackTopic;
   private final String outputTopic;
   private final String configurationTopic;
-  private final Duration windowSize;
+  private final Duration emitPeriod;
   private final Duration gracePeriod;
 
+  // SERDEs
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
+
   private final StreamsBuilder builder = new StreamsBuilder();
   private final RecordAggregator recordAggregator = new RecordAggregator();
 
-
   /**
    * Create a new {@link TopologyBuilder} using the given topics.
+   *
+   * @param inputTopic The topic where to read sensor measurements from.
+   * @param configurationTopic The topic where the hierarchy of the sensors is published.
+   * @param feedbackTopic The topic where aggregation results are written to for feedback.
+   * @param outputTopic The topic where to publish aggregation results.
+   * @param emitPeriod The Duration results are emitted with.
+   * @param gracePeriod The Duration for how long late arriving records are considered.
+   * @param srAvroSerdeFactory Factory for creating avro SERDEs
+   *
    */
   public TopologyBuilder(final String inputTopic, final String outputTopic,
-      final String configurationTopic, final Duration windowSize, final Duration gracePeriod) {
+      final String feedbackTopic, final String configurationTopic,
+      final Duration emitPeriod, final Duration gracePeriod,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory) {
     this.inputTopic = inputTopic;
-    this.outputTopic = outputTopic;
+    this.feedbackTopic = feedbackTopic;
     this.configurationTopic = configurationTopic;
-    this.windowSize = windowSize;
+    this.outputTopic = outputTopic;
+    this.emitPeriod = emitPeriod;
     this.gracePeriod = gracePeriod;
+
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
   }
 
   /**
-   * Build the {@link Topology} for the History microservice.
+   * Build the {@link Topology} for the Aggregation microservice.
    */
   public Topology build() {
     // 1. Build Parent-Sensor Table
@@ -71,9 +85,12 @@ public class TopologyBuilder {
         this.buildLastValueTable(parentSensorTable, inputTable);
 
     // 4. Build Aggregations Stream
-    final KStream<String, AggregatedActivePowerRecord> aggregations =
+    final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations =
         this.buildAggregationStream(lastValueTable);
 
+    // 6. Expose Feedback Stream
+    this.exposeFeedbackStream(aggregations);
+
     // 5. Expose Aggregations Stream
     this.exposeOutputStream(aggregations);
 
@@ -84,21 +101,22 @@ public class TopologyBuilder {
     final KStream<String, ActivePowerRecord> values = this.builder
         .stream(this.inputTopic, Consumed.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())));
+            this.srAvroSerdeFactory.forValues()));
+
     final KStream<String, ActivePowerRecord> aggregationsInput = this.builder
-        .stream(this.outputTopic, Consumed.with(
+        .stream(this.feedbackTopic, Consumed.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new AggregatedActivePowerRecordFactory())))
+            this.srAvroSerdeFactory.<AggregatedActivePowerRecord>forValues()))
         .mapValues(r -> new ActivePowerRecord(r.getIdentifier(), r.getTimestamp(), r.getSumInW()));
 
     final KTable<String, ActivePowerRecord> inputTable = values
         .merge(aggregationsInput)
-        .mapValues((k, v) -> new ActivePowerRecord(v.getIdentifier(), System.currentTimeMillis(),
-            v.getValueInW()))
-        .groupByKey(Grouped.with(Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
-        .reduce((aggr, value) -> value, Materialized.with(Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())));
+        .groupByKey(Grouped.with(
+            Serdes.String(),
+            this.srAvroSerdeFactory.forValues()))
+        .reduce((aggr, value) -> value, Materialized.with(
+            Serdes.String(),
+            this.srAvroSerdeFactory.forValues()));
     return inputTable;
   }
 
@@ -108,15 +126,9 @@ public class TopologyBuilder {
         .filter((key, value) -> key == Event.SENSOR_REGISTRY_CHANGED
             || key == Event.SENSOR_REGISTRY_STATUS);
 
-    final ChildParentsTransformerFactory childParentsTransformerFactory =
-        new ChildParentsTransformerFactory();
-    this.builder.addStateStore(childParentsTransformerFactory.getStoreBuilder());
-
     return configurationStream
         .mapValues(data -> SensorRegistry.fromJson(data))
-        .flatTransform(
-            childParentsTransformerFactory.getTransformerSupplier(),
-            childParentsTransformerFactory.getStoreName())
+        .flatTransform(new ChildParentsTransformerSupplier())
         .groupByKey(Grouped.with(Serdes.String(), OptionalParentsSerde.serde()))
         .aggregate(
             () -> Set.<String>of(),
@@ -124,33 +136,27 @@ public class TopologyBuilder {
             Materialized.with(Serdes.String(), ParentsSerde.serde()));
   }
 
-
   private KTable<Windowed<SensorParentKey>, ActivePowerRecord> buildLastValueTable(
       final KTable<String, Set<String>> parentSensorTable,
       final KTable<String, ActivePowerRecord> inputTable) {
-    final JointFlatTransformerFactory jointFlatMapTransformerFactory =
-        new JointFlatTransformerFactory();
-    this.builder.addStateStore(jointFlatMapTransformerFactory.getStoreBuilder());
 
     return inputTable
         .join(parentSensorTable, (record, parents) -> new JointRecordParents(parents, record))
         .toStream()
-        .flatTransform(
-            jointFlatMapTransformerFactory.getTransformerSupplier(),
-            jointFlatMapTransformerFactory.getStoreName())
+        .flatTransform(new JointFlatTransformerSupplier())
         .groupByKey(Grouped.with(
             SensorParentKeySerde.serde(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
-        .windowedBy(TimeWindows.of(this.windowSize).grace(this.gracePeriod))
+            this.srAvroSerdeFactory.forValues()))
+        .windowedBy(TimeWindows.of(this.emitPeriod).grace(this.gracePeriod))
         .reduce(
             // TODO Configurable window aggregation function
-            (aggValue, newValue) -> newValue,
-            Materialized.with(SensorParentKeySerde.serde(),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())));
-
+            (oldVal, newVal) -> newVal.getTimestamp() >= oldVal.getTimestamp() ? newVal : oldVal,
+            Materialized.with(
+                SensorParentKeySerde.serde(),
+                this.srAvroSerdeFactory.forValues()));
   }
 
-  private KStream<String, AggregatedActivePowerRecord> buildAggregationStream(
+  private KTable<Windowed<String>, AggregatedActivePowerRecord> buildAggregationStream(
       final KTable<Windowed<SensorParentKey>, ActivePowerRecord> lastValueTable) {
     return lastValueTable
         .groupBy(
@@ -158,53 +164,44 @@ public class TopologyBuilder {
             Grouped.with(
                 new WindowedSerdes.TimeWindowedSerde<>(
                     Serdes.String(),
-                    this.windowSize.toMillis()),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+                    this.emitPeriod.toMillis()),
+                this.srAvroSerdeFactory.forValues()))
         .aggregate(
-            () -> null, this.recordAggregator::add, this.recordAggregator::substract,
+            () -> null,
+            this.recordAggregator::add,
+            this.recordAggregator::substract,
             Materialized.with(
                 new WindowedSerdes.TimeWindowedSerde<>(
                     Serdes.String(),
-                    this.windowSize.toMillis()),
-                IMonitoringRecordSerde.serde(new AggregatedActivePowerRecordFactory())))
-        .suppress(Suppressed.untilTimeLimit(this.windowSize, BufferConfig.unbounded()))
-        // .suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded()))
-        .toStream()
+                    this.emitPeriod.toMillis()),
+                this.srAvroSerdeFactory.forValues()))
         // TODO timestamp -1 indicates that this record is emitted by an substract event
-        .filter((k, record) -> record.getTimestamp() != -1)
-        .map((k, v) -> KeyValue.pair(k.key(), v)); // TODO compute Timestamp
+        .filter((k, record) -> record.getTimestamp() != -1);
   }
 
-  private StatsAccumulator latencyStats = new StatsAccumulator();
-  private long lastTime = System.currentTimeMillis();
+  private void exposeFeedbackStream(
+      final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations) {
 
-  private void exposeOutputStream(final KStream<String, AggregatedActivePowerRecord> aggregations) {
     aggregations
-        .peek((k, v) -> {
-          final long time = System.currentTimeMillis();
-          final long latency = time - v.getTimestamp();
-          this.latencyStats.add(latency);
-          if (time - this.lastTime >= 1000) {
-            System.out.println("latency,"
-                + time + ','
-                + this.latencyStats.mean() + ','
-                + (this.latencyStats.count() > 0
-                    ? this.latencyStats.populationStandardDeviation()
-                    : Double.NaN)
-                + ','
-                + (this.latencyStats.count() > 1
-                    ? this.latencyStats.sampleStandardDeviation()
-                    : Double.NaN)
-                + ','
-                + this.latencyStats.min() + ','
-                + this.latencyStats.max() + ','
-                + this.latencyStats.count());
-            this.latencyStats = new StatsAccumulator();
-            this.lastTime = time;
-          }
-        })
+        .toStream()
+        .filter((k, record) -> record != null)
+        .selectKey((k, v) -> k.key())
+        .to(this.feedbackTopic, Produced.with(
+            Serdes.String(),
+            this.srAvroSerdeFactory.forValues()));
+  }
+
+  private void exposeOutputStream(
+      final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations) {
+
+    aggregations
+        // .suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded()))
+        .suppress(Suppressed.untilTimeLimit(this.emitPeriod, BufferConfig.unbounded()))
+        .toStream()
+        .filter((k, record) -> record != null)
+        .selectKey((k, v) -> k.key())
         .to(this.outputTopic, Produced.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new AggregatedActivePowerRecordFactory())));
+            this.srAvroSerdeFactory.forValues()));
   }
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
index ce7d5e90b476a9d8b8508ea2356f4a2da1d856f3..16addb8510eec2254d4787edbfbfbe186996fdea 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
@@ -4,19 +4,21 @@ import java.time.Duration;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
  */
 public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD builder method
 
-  private static final Duration WINDOW_SIZE_DEFAULT = Duration.ofSeconds(1);
+  private static final Duration EMIT_PERIOD_DEFAULT = Duration.ofSeconds(1);
   private static final Duration GRACE_PERIOD_DEFAULT = Duration.ZERO;
 
   private String inputTopic; // NOPMD
+  private String feedbackTopic; // NOPMD
   private String outputTopic; // NOPMD
   private String configurationTopic; // NOPMD
-  private Duration windowSize; // NOPMD
+  private Duration emitPeriod; // NOPMD
   private Duration gracePeriod; // NOPMD
 
   public Uc2KafkaStreamsBuilder inputTopic(final String inputTopic) {
@@ -24,6 +26,11 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
     return this;
   }
 
+  public Uc2KafkaStreamsBuilder feedbackTopic(final String feedbackTopic) {
+    this.feedbackTopic = feedbackTopic;
+    return this;
+  }
+
   public Uc2KafkaStreamsBuilder outputTopic(final String outputTopic) {
     this.outputTopic = outputTopic;
     return this;
@@ -34,8 +41,8 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
     return this;
   }
 
-  public Uc2KafkaStreamsBuilder windowSize(final Duration windowSize) {
-    this.windowSize = Objects.requireNonNull(windowSize);
+  public Uc2KafkaStreamsBuilder emitPeriod(final Duration emitPeriod) {
+    this.emitPeriod = Objects.requireNonNull(emitPeriod);
     return this;
   }
 
@@ -47,15 +54,18 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
   @Override
   protected Topology buildTopology() {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
+    Objects.requireNonNull(this.feedbackTopic, "Feedback topic has not been set.");
     Objects.requireNonNull(this.outputTopic, "Output topic has not been set.");
     Objects.requireNonNull(this.configurationTopic, "Configuration topic has not been set.");
 
     final TopologyBuilder topologyBuilder = new TopologyBuilder(
         this.inputTopic,
+        this.feedbackTopic,
         this.outputTopic,
         this.configurationTopic,
-        this.windowSize == null ? WINDOW_SIZE_DEFAULT : this.windowSize,
-        this.gracePeriod == null ? GRACE_PERIOD_DEFAULT : this.gracePeriod);
+        this.emitPeriod == null ? EMIT_PERIOD_DEFAULT : this.emitPeriod,
+        this.gracePeriod == null ? GRACE_PERIOD_DEFAULT : this.gracePeriod,
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl));
 
     return topologyBuilder.build();
   }
diff --git a/uc2-application/src/main/resources/META-INF/application.properties b/uc2-application/src/main/resources/META-INF/application.properties
index f9a5225680f638239e637e99bf8d65152d15764d..10c47960adb012ba5c572e3833a37d821189eb8e 100644
--- a/uc2-application/src/main/resources/META-INF/application.properties
+++ b/uc2-application/src/main/resources/META-INF/application.properties
@@ -1,15 +1,17 @@
 application.name=theodolite-uc2-application
 application.version=0.0.1
 
-configuration.host=localhost
-configuration.port=8082
-configuration.kafka.topic=configuration
-
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
+kafka.configuration.topic=configuration
+kafka.feedback.topic=aggregation-feedback
 kafka.output.topic=output
-window.size.ms=1000
-window.grace.ms=0
+
+schema.registry.url=http://localhost:8091
+
+emit.period.ms=5000
+grace.period.ms=0
+
 num.threads=1
 commit.interval.ms=100
 cache.max.bytes.buffering=-1
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
index 49ed674bc4442f01de1cf51e4510f2079524933d..54e8c460e642d53bb013ef6888570d6fc36ff614 100644
--- a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
+++ b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
@@ -3,7 +3,6 @@ package theodolite.uc2.streamprocessing;
 import java.util.Optional;
 import java.util.Set;
 import org.junit.Test;
-import theodolite.uc2.streamprocessing.OptionalParentsSerde;
 
 public class OptionalParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
index 15872798698ceffcdbaddb689d4179afd7d67a01..f12604d6a19ca36e9c151210005c910b37908307 100644
--- a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
+++ b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
@@ -2,7 +2,6 @@ package theodolite.uc2.streamprocessing;
 
 import java.util.Set;
 import org.junit.Test;
-import theodolite.uc2.streamprocessing.ParentsSerde;
 
 public class ParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
index 7d9fe3a6eb83b82d85913f212fe9a930f194b220..7ca99bcb79baeb5f95a8270b99a559f2f108867e 100644
--- a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
+++ b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
@@ -1,8 +1,6 @@
 package theodolite.uc2.streamprocessing;
 
 import org.junit.Test;
-import theodolite.uc2.streamprocessing.SensorParentKey;
-import theodolite.uc2.streamprocessing.SensorParentKeySerde;
 
 public class SensorParentKeySerdeTest {
 
diff --git a/uc2-workload-generator/build.gradle b/uc2-workload-generator/build.gradle
index f2c3e5d2e73b655dffd94222ecfbc4fc31b7f722..b92e0c2edc54786ea957338b9981922f0a6a7b32 100644
--- a/uc2-workload-generator/build.gradle
+++ b/uc2-workload-generator/build.gradle
@@ -1 +1 @@
-mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator"
+mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator"
diff --git a/uc2-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc2-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
index c8b3a1846254603c8690bf395c24c6d6f9fb2166..ad24e8e4bc8f86b7ed4d5dc2822622f8da22d6d1 100644
--- a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
+++ b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
@@ -10,8 +10,14 @@ import org.apache.kafka.common.serialization.StringSerializer;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.configuration.events.EventSerde;
 
+/**
+ * Class to publish a configuration to Kafka.
+ *
+ */
 public class ConfigPublisher {
 
+  private static final String MEMORY_CONFIG = "134217728"; // 128 MB
+
   private final String topic;
 
   private final Producer<Event, String> producer;
@@ -20,6 +26,13 @@ public class ConfigPublisher {
     this(bootstrapServers, topic, new Properties());
   }
 
+  /**
+   * Creates a new {@link ConfigPublisher} object.
+   *
+   * @param bootstrapServers Zoo Keeper server.
+   * @param topic where to write the configuration.
+   * @param defaultProperties default properties.
+   */
   public ConfigPublisher(final String bootstrapServers, final String topic,
       final Properties defaultProperties) {
     this.topic = topic;
@@ -27,13 +40,19 @@ public class ConfigPublisher {
     final Properties properties = new Properties();
     properties.putAll(defaultProperties);
     properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
+    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, MEMORY_CONFIG);
+    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, MEMORY_CONFIG);
 
     this.producer =
         new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
   }
 
+  /**
+   * Publish an event with given value to the kafka topic.
+   *
+   * @param event Which {@link Event} happened.
+   * @param value Configuration value.
+   */
   public void publish(final Event event, final String value) {
     final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
     try {
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
index 823f4f2761cc3c409451c67b7302e3d2f17adbb9..3eb3e8d25b1f1aa6f302673727b8457a744fb503 100644
--- a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
+++ b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
@@ -1,121 +1,139 @@
 package theodolite.uc2.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
 import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.sensorregistry.SensorRegistry;
 
-public class LoadGenerator {
+/**
+ * The {@code LoadGenerator} creates a load in Kafka.
+ */
+public final class LoadGenerator {
+
+  private static final int SLEEP_PERIOD = 30_000;
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
+  // Constants
+  private static final String DEEP = "deep";
+  private static final long MAX_DURATION_IN_DAYS = 30L;
+
+  // Make this a utility class, because all methods are static.
+  private LoadGenerator() {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Main method.
+   *
+   * @param args CLI arguments
+   * @throws InterruptedException Interrupt happened
+   * @throws IOException happened.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
+    // uc2
     LOGGER.info("Start workload generator for use case UC2.");
 
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
+    // get environment variables
+    final String hierarchy = System.getenv("HIERARCHY");
+    if (hierarchy != null && hierarchy.equals(DEEP)) {
+      LOGGER.error(
+          "The HIERARCHY parameter is no longer supported. Creating a full hierachy instead.");
+    }
     final int numNestedGroups = Integer
         .parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
+    final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
     final boolean sendRegistry = Boolean
         .parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
     final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
             "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
+    // build sensor registry
+    final SensorRegistry sensorRegistry =
+        new SensorRegistryBuilder(numNestedGroups, numSensors).build();
 
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
-        new KafkaRecordSender<>(kafkaBootstrapServers,
-            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
 
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl,
-      final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
+
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", sensorRegistry.getMachineSensors().size()))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .beforeAction(() -> {
+              if (sendRegistry) {
+                final ConfigPublisher configPublisher =
+                    new ConfigPublisher(kafkaBootstrapServers, "configuration");
+                configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+                configPublisher.close();
+                LOGGER.info("Configuration sent.");
+
+                LOGGER.info("Now wait 30 seconds");
+                try {
+                  Thread.sleep(SLEEP_PERIOD);
+                } catch (final InterruptedException e) {
+                  // TODO Auto-generated catch block
+                  LOGGER.error(e.getMessage(), e);
+                }
+                LOGGER.info("And woke up again :)");
+              }
+            })
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
+
+    // start
+    workloadGenerator.start();
   }
 
 }
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGeneratorExtrem.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGeneratorExtrem.java
deleted file mode 100644
index 1e58541758602cd2b1ea84f3ac3360aa3911425d..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGeneratorExtrem.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package theodolite.uc2.workloadgenerator;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGeneratorExtrem {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final boolean doNothing =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final int producers =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final SensorRegistry sensorRegistry =
-        buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-        .<KafkaRecordSender<ActivePowerRecord>>generate(
-            () -> new KafkaRecordSender<>(
-                kafkaBootstrapServers,
-                kafkaInputTopic,
-                r -> r.getIdentifier(),
-                r -> r.getTimestamp(),
-                kafkaProperties))
-        .limit(producers)
-        .collect(Collectors.toList());
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    for (int i = 0; i < threads; i++) {
-      final int threadId = i;
-      new Thread(() -> {
-        while (true) {
-          for (final String sensor : sensors) {
-            if (!doNothing) {
-              kafkaRecordSenders.get(threadId % producers).write(new ActivePowerRecord(
-                  sensor,
-                  System.currentTimeMillis(),
-                  value));
-            }
-          }
-        }
-      }).start();
-    }
-
-    while (true) {
-      printCpuUsagePerThread();
-    }
-
-    // System.out.println("Wait for termination...");
-    // Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-    // System.out.println("Will terminate now");
-  }
-
-  private static void printCpuUsagePerThread() throws InterruptedException {
-    final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-    final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-    final long start = System.nanoTime();
-    final long[] startCpuTimes = new long[threads.size()];
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-    }
-
-    Thread.sleep(5000);
-
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-      final long dur = System.nanoTime() - start;
-      final double util = (double) cpuTime / dur;
-      System.out.println(
-          "Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-    }
-  }
-
-  private static SensorRegistry buildSensorRegistry(final String hierarchy,
-      final int numNestedGroups, final int numSensor) {
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-    return sensorRegistry;
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..7c34ac89471386f4ddd508a304f2197602beab27
--- /dev/null
+++ b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
@@ -0,0 +1,51 @@
+package theodolite.uc2.workloadgenerator;
+
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+/**
+ * Builder for creating a nested {@link SensorRegistry} with {@code numNestedGroups} levels and
+ * {@code numSensors} children per group.
+ */
+public final class SensorRegistryBuilder {
+
+  private final int numNestedGroups;
+  private final int numSensors;
+
+  public SensorRegistryBuilder(final int numNestedGroups, final int numSensors) {
+    this.numNestedGroups = numNestedGroups;
+    this.numSensors = numSensors;
+  }
+
+  /**
+   * Creates the {@link SensorRegistry}.
+   */
+  public SensorRegistry build() {
+    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+    this.addChildren(
+        sensorRegistry.getTopLevelSensor(),
+        this.numSensors,
+        1,
+        this.numNestedGroups,
+        0);
+    return sensorRegistry;
+  }
+
+  private int addChildren(final MutableAggregatedSensor parent, final int numChildren,
+      final int lvl, final int maxLvl, final int startId) {
+    int nextId = startId;
+    for (int c = 0; c < numChildren; c++) {
+      if (lvl == maxLvl) {
+        parent.addChildMachineSensor("s_" + nextId);
+        nextId++;
+      } else {
+        final MutableAggregatedSensor newParent =
+            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
+        nextId = this.addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
+      }
+    }
+    return nextId;
+  }
+
+}
diff --git a/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java b/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..17b208edac4acafa92b7a75e053e2fe97a9afdb6
--- /dev/null
+++ b/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
@@ -0,0 +1,46 @@
+package theodolite.uc2.workloadgenerator;
+
+
+import java.util.Collection;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.junit.Assert;
+import org.junit.Test;
+import titan.ccp.model.sensorregistry.AggregatedSensor;
+import titan.ccp.model.sensorregistry.MachineSensor;
+import titan.ccp.model.sensorregistry.Sensor;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+public class SensorRegistryBuilderTest {
+
+  @Test
+  public void testStructure() {
+    final SensorRegistry registry = new SensorRegistryBuilder(2, 2).build();
+    final AggregatedSensor root = registry.getTopLevelSensor();
+    final Collection<Sensor> firstLevelSensors = root.getChildren();
+    Assert.assertEquals(2, firstLevelSensors.size());
+    for (final Sensor sensor : firstLevelSensors) {
+      Assert.assertTrue(sensor instanceof AggregatedSensor);
+      final AggregatedSensor aggregatedSensor = (AggregatedSensor) sensor;
+      final Collection<Sensor> secondLevelSensors = aggregatedSensor.getChildren();
+      Assert.assertEquals(2, secondLevelSensors.size());
+      for (final Sensor machineSensors : secondLevelSensors) {
+        Assert.assertTrue(machineSensors instanceof MachineSensor);
+
+      }
+    }
+  }
+
+  @Test
+  public void testMachineSensorNaming() {
+    final SensorRegistry registry = new SensorRegistryBuilder(2, 2).build();
+    final Set<String> machineSensors = registry.getMachineSensors().stream()
+        .map(s -> s.getIdentifier()).collect(Collectors.toSet());
+
+    Assert.assertTrue(machineSensors.contains("s_0"));
+    Assert.assertTrue(machineSensors.contains("s_1"));
+    Assert.assertTrue(machineSensors.contains("s_2"));
+    Assert.assertTrue(machineSensors.contains("s_3"));
+  }
+
+}
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
index 18aae8c3499643c29901c3ca7461ec707d59c280..b245b1645c9e5ee68df3f108802c9b91d70cf017 100644
--- a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
+++ b/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
@@ -5,8 +5,9 @@ import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc3.streamprocessing.Uc3KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -15,7 +16,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class HistoryService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
   private final int windowDurationMinutes = Integer
@@ -45,6 +46,7 @@ public class HistoryService {
         .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
         .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
index 0ad1845f656bcbd11b61c0e0affa9b6bcfabd2f7..74eed74c52a78df229c02542bc6e66d7f796c2c7 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
+++ b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
@@ -14,8 +14,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import theodolite.uc3.streamprocessing.util.StatsFactory;
 import titan.ccp.common.kafka.GenericSerde;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.ActivePowerRecordFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
@@ -26,6 +26,7 @@ public class TopologyBuilder {
 
   private final String inputTopic;
   private final String outputTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
   private final Duration duration;
 
   private final StreamsBuilder builder = new StreamsBuilder();
@@ -34,9 +35,11 @@ public class TopologyBuilder {
    * Create a new {@link TopologyBuilder} using the given topics.
    */
   public TopologyBuilder(final String inputTopic, final String outputTopic,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory,
       final Duration duration) {
     this.inputTopic = inputTopic;
     this.outputTopic = outputTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
     this.duration = duration;
   }
 
@@ -47,7 +50,7 @@ public class TopologyBuilder {
     this.builder
         .stream(this.inputTopic,
             Consumed.with(Serdes.String(),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+                this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
         .groupByKey()
         .windowedBy(TimeWindows.of(this.duration))
         // .aggregate(
@@ -62,7 +65,7 @@ public class TopologyBuilder {
                 GenericSerde.from(Stats::toByteArray, Stats::fromByteArray)))
         .toStream()
         .map((k, s) -> KeyValue.pair(k.key(), s.toString()))
-        .peek((k, v) -> System.out.println(k + ": " + v))
+        .peek((k, v) -> LOGGER.info(k + ": " + v))
         .to(this.outputTopic, Produced.with(Serdes.String(), Serdes.String()));
 
     return this.builder.build();
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
index 63841361b06bb054fee203a894fba0c11c249d16..e74adf7c87673cc0e6ea4004dbcb1c0a6fc907ac 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
+++ b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
@@ -4,6 +4,7 @@ import java.time.Duration;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -36,7 +37,7 @@ public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder {
     Objects.requireNonNull(this.windowDuration, "Window duration has not been set.");
 
     final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic,
-        this.windowDuration);
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), this.windowDuration);
     return topologyBuilder.build();
   }
 
diff --git a/uc3-application/src/main/resources/META-INF/application.properties b/uc3-application/src/main/resources/META-INF/application.properties
index 96e2d8b6ff46f3b3ce878b1fec011e9315e118bc..2ceaf37224b0bff54b09beaabe29210216e11671 100644
--- a/uc3-application/src/main/resources/META-INF/application.properties
+++ b/uc3-application/src/main/resources/META-INF/application.properties
@@ -4,6 +4,10 @@ application.version=0.0.1
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
 kafka.output.topic=output
+kafka.window.duration.minutes=1
+
+schema.registry.url=http://localhost:8091
+
 num.threads=1
 commit.interval.ms=100
 cache.max.bytes.buffering=-1
diff --git a/uc3-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc3-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc3-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
index a063ea359571d67fe118ec2f0951664e62624d98..85f6a94036c53b48973ba2200212fc8e5dfd663d 100644
--- a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
+++ b/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
@@ -1,92 +1,102 @@
 package theodolite.uc3.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.models.records.ActivePowerRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+import titan.ccp.model.records.ActivePowerRecord;
 
-public class LoadGenerator {
+/**
+ * The {@code LoadGenerator} creates a load in Kafka.
+ */
+public final class LoadGenerator {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
-  private static final int WL_MAX_RECORDS = 150_000;
+  // constants
+  private static final long MAX_DURATION_IN_DAYS = 30L;
 
+  // Make this a utility class, because all methods are static.
+  private LoadGenerator() {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Main method.
+   *
+   * @param args CLI arguments
+   * @throws InterruptedException Interrupt happened
+   * @throws IOException happened.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
+    // uc2
     LOGGER.info("Start workload generator for use case UC3.");
 
+    // get environment variables
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
     final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
-    final int instanceId = getInstanceId();
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
     final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
             "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
-    final int idStart = instanceId * WL_MAX_RECORDS;
-    final int idEnd = Math.min((instanceId + 1) * WL_MAX_RECORDS, numSensors);
-    LOGGER.info("Generating data for sensors with IDs from {} to {} (exclusive).", idStart, idEnd);
-    final List<String> sensors = IntStream.range(idStart, idEnd)
-        .mapToObj(i -> "s_" + i)
-        .collect(Collectors.toList());
-
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
     final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
-        new KafkaRecordSender<>(kafkaBootstrapServers,
-            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
 
-    LOGGER.info("Start setting up sensors.");
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
-    LOGGER.info("Finished setting up sensors.");
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", numSensors))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
 
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
+    // start
+    workloadGenerator.start();
 
   }
-
-  private static int getInstanceId() {
-    final String podName = System.getenv("POD_NAME");
-    if (podName == null) {
-      return 0;
-    } else {
-      return Pattern.compile("-")
-          .splitAsStream(podName)
-          .reduce((p, x) -> x)
-          .map(Integer::parseInt)
-          .orElse(0);
-    }
-  }
-
 }
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java b/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
index 3e3073fdeed682ae09e345d9f315585e960a3440..23af805733de2bb3f6384fa924a2322490ee58d9 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
+++ b/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
@@ -4,8 +4,9 @@ import java.time.Duration;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc4.streamprocessing.Uc4KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -14,7 +15,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class HistoryService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
 
@@ -45,6 +46,7 @@ public class HistoryService {
         .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
         .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
index 214be2dd073e21944ec0765eb30ed72a81b15b1b..97807e3bdecf4000cc2edeed364b8f9d1bc9bb8e 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
+++ b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java
@@ -1,5 +1,7 @@
 package theodolite.uc4.streamprocessing;
 
+import java.util.Objects;
+
 /**
  * Composed key of an hour of the day and a sensor id.
  */
@@ -26,4 +28,22 @@ public class HourOfDayKey {
     return this.sensorId + ";" + this.hourOfDay;
   }
 
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.hourOfDay, this.sensorId);
+  }
+
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (obj instanceof HourOfDayKey) {
+      final HourOfDayKey other = (HourOfDayKey) obj;
+      return Objects.equals(this.hourOfDay, other.hourOfDay)
+          && Objects.equals(this.sensorId, other.sensorId);
+    }
+    return false;
+  }
+
 }
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
index b4632aaf15ee5f2572c795458f4bfded5c8cfbcd..a92abae6e11c4bf66a5d8d8dee0f10b088e8274b 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
+++ b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
@@ -17,8 +17,8 @@ import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import theodolite.uc4.streamprocessing.util.StatsFactory;
 import titan.ccp.common.kafka.GenericSerde;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.ActivePowerRecordFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
@@ -32,6 +32,7 @@ public class TopologyBuilder {
 
   private final String inputTopic;
   private final String outputTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
   private final Duration aggregtionDuration;
   private final Duration aggregationAdvance;
 
@@ -41,9 +42,11 @@ public class TopologyBuilder {
    * Create a new {@link TopologyBuilder} using the given topics.
    */
   public TopologyBuilder(final String inputTopic, final String outputTopic,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory,
       final Duration aggregtionDuration, final Duration aggregationAdvance) {
     this.inputTopic = inputTopic;
     this.outputTopic = outputTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
     this.aggregtionDuration = aggregtionDuration;
     this.aggregationAdvance = aggregationAdvance;
   }
@@ -58,14 +61,14 @@ public class TopologyBuilder {
     this.builder
         .stream(this.inputTopic,
             Consumed.with(Serdes.String(),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+                this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
         .selectKey((key, value) -> {
           final Instant instant = Instant.ofEpochMilli(value.getTimestamp());
           final LocalDateTime dateTime = LocalDateTime.ofInstant(instant, this.zone);
           return keyFactory.createKey(value.getIdentifier(), dateTime);
         })
         .groupByKey(
-            Grouped.with(keySerde, IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+            Grouped.with(keySerde, this.srAvroSerdeFactory.forValues()))
         .windowedBy(TimeWindows.of(this.aggregtionDuration).advanceBy(this.aggregationAdvance))
         .aggregate(
             () -> Stats.of(),
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
index 8220f4cd36b0639cd69ac102177a53b1ed90e5b6..7c9e2c4f790cf1fbb7dd34db573576d1e64077db 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
+++ b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
@@ -4,6 +4,7 @@ import java.time.Duration;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -45,6 +46,7 @@ public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
     final TopologyBuilder topologyBuilder = new TopologyBuilder(
         this.inputTopic,
         this.outputTopic,
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl),
         this.aggregtionDuration,
         this.aggregationAdvance);
 
diff --git a/uc4-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc4-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
index 90d28aafb86b2b5da050d0110d425b5ec1ffe5e6..ff551e7ef423633137d122dfed7d6e03d362e7ff 100644
--- a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
+++ b/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
@@ -1,70 +1,103 @@
 package theodolite.uc4.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.models.records.ActivePowerRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+import titan.ccp.model.records.ActivePowerRecord;
 
-public class LoadGenerator {
+/**
+ * The {@code LoadGenerator} creates a load in Kafka.
+ */
+public final class LoadGenerator {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
+  // constants
+  private static final long MAX_DURATION_IN_DAYS = 30L;
+
+  // Make this a utility class, because all methods are static.
+  private LoadGenerator() {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Main method.
+   *
+   * @param args CLI arguments
+   * @throws InterruptedException Interrupt happened
+   * @throws IOException happened.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
     // uc4
     LOGGER.info("Start workload generator for use case UC4.");
 
-    final int numSensor =
+    // get environment variables
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
+    final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
     final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "1"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
             "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
-        new KafkaRecordSender<>(kafkaBootstrapServers,
-            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
 
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    final List<String> sensors =
-        IntStream.range(0, numSensor).mapToObj(i -> "s_" + i).collect(Collectors.toList());
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
 
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", numSensors))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
 
+    // start
+    workloadGenerator.start();
   }
 
 }
diff --git a/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs b/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..fa98ca63d77bdee891150bd6713f70197a75cefc
--- /dev/null
+++ b/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,127 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=false
+sp_cleanup.remove_redundant_semicolons=true
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs b/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/workload-generator-commons/build.gradle b/workload-generator-commons/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..eef987cd444c3b6c3d8a532c8d192e94311176db
--- /dev/null
+++ b/workload-generator-commons/build.gradle
@@ -0,0 +1,3 @@
+dependencies {
+    implementation 'org.apache.curator:curator-recipes:4.3.0'
+}
\ No newline at end of file
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
new file mode 100644
index 0000000000000000000000000000000000000000..33818b51084ce33a564d6f30cefb26b481d0a859
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
@@ -0,0 +1,123 @@
+package theodolite.commons.workloadgeneration.communication.kafka;
+
+import java.util.Properties;
+import java.util.function.Function;
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.workloadgeneration.functions.Transport;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+
+/**
+ * Sends monitoring records to Kafka.
+ *
+ * @param <T> {@link IMonitoringRecord} to send
+ */
+public class KafkaRecordSender<T extends SpecificRecord> implements Transport<T> {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
+
+  private final String topic;
+
+  private final Function<T, String> keyAccessor;
+
+  private final Function<T, Long> timestampAccessor;
+
+  private final Producer<String, T> producer;
+
+  /**
+   * Create a new {@link KafkaRecordSender}.
+   */
+  private KafkaRecordSender(final Builder<T> builder) {
+    this.topic = builder.topic;
+    this.keyAccessor = builder.keyAccessor;
+    this.timestampAccessor = builder.timestampAccessor;
+
+    final Properties properties = new Properties();
+    properties.putAll(builder.defaultProperties);
+    properties.put("bootstrap.servers", builder.bootstrapServers);
+    // properties.put("acks", this.acknowledges);
+    // properties.put("batch.size", this.batchSize);
+    // properties.put("linger.ms", this.lingerMs);
+    // properties.put("buffer.memory", this.bufferMemory);
+
+    final SchemaRegistryAvroSerdeFactory avroSerdeFactory =
+        new SchemaRegistryAvroSerdeFactory(builder.schemaRegistryUrl);
+    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
+        avroSerdeFactory.<T>forKeys().serializer());
+  }
+
+  /**
+   * Builder class to build a new {@link KafkaRecordSender}.
+   *
+   * @param <T> Type of the records that should later be send.
+   */
+  public static class Builder<T extends SpecificRecord> {
+
+    private final String bootstrapServers;
+    private final String topic;
+    private final String schemaRegistryUrl;
+    private Function<T, String> keyAccessor = x -> ""; // NOPMD
+    private Function<T, Long> timestampAccessor = x -> null; // NOPMD
+    private Properties defaultProperties = new Properties(); // NOPMD
+
+    /**
+     * Creates a Builder object for a {@link KafkaRecordSender}.
+     *
+     * @param bootstrapServers The Server to for accessing Kafka.
+     * @param topic The topic where to write.
+     * @param schemaRegistryUrl URL to the schema registry for avro.
+     */
+    public Builder(final String bootstrapServers, final String topic,
+        final String schemaRegistryUrl) {
+      this.bootstrapServers = bootstrapServers;
+      this.topic = topic;
+      this.schemaRegistryUrl = schemaRegistryUrl;
+    }
+
+    public Builder<T> keyAccessor(final Function<T, String> keyAccessor) {
+      this.keyAccessor = keyAccessor;
+      return this;
+    }
+
+    public Builder<T> timestampAccessor(final Function<T, Long> timestampAccessor) {
+      this.timestampAccessor = timestampAccessor;
+      return this;
+    }
+
+    public Builder<T> defaultProperties(final Properties defaultProperties) {
+      this.defaultProperties = defaultProperties;
+      return this;
+    }
+
+    public KafkaRecordSender<T> build() {
+      return new KafkaRecordSender<>(this);
+    }
+  }
+
+  /**
+   * Write the passed monitoring record to Kafka.
+   */
+  public void write(final T monitoringRecord) {
+    final ProducerRecord<String, T> record =
+        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
+            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
+
+    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
+    this.producer.send(record);
+  }
+
+  public void terminate() {
+    this.producer.close();
+  }
+
+  @Override
+  public void transport(final T message) {
+    this.write(message);
+  }
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
new file mode 100644
index 0000000000000000000000000000000000000000..2249abcbcb1071cf880b2ee80f5d41f2b3dab463
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
@@ -0,0 +1,202 @@
+package theodolite.commons.workloadgeneration.communication.zookeeper;
+
+import java.nio.charset.StandardCharsets;
+import java.util.function.BiConsumer;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.CuratorWatcher;
+import org.apache.curator.framework.recipes.atomic.AtomicValue;
+import org.apache.curator.framework.recipes.atomic.DistributedAtomicInteger;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher.Event.EventType;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.misc.WorkloadDefinition;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * The central class responsible for distributing the workload through all workload generators.
+ */
+public class WorkloadDistributor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadDistributor.class);
+
+  private static final String NAMESPACE = "workload-generation";
+  private static final String COUNTER_PATH = "/counter";
+  private static final String WORKLOAD_PATH = "/workload";
+  private static final String WORKLOAD_DEFINITION_PATH = "/workload/definition";
+
+  // Curator retry strategy
+  private static final int BASE_SLEEP_TIME_MS = 2000;
+  private static final int MAX_RETRIES = 5;
+
+  // Wait time
+  private static final int MAX_WAIT_TIME = 20_000;
+
+  private final DistributedAtomicInteger counter;
+  private final KeySpace keySpace;
+  private final BeforeAction beforeAction;
+  private final BiConsumer<WorkloadDefinition, Integer> workerAction;
+
+  private final int instances;
+  private final ZooKeeper zooKeeper; // NOPMD keep instance variable instead of local variable
+  private final CuratorFramework client;
+
+  private boolean workloadGenerationStarted = false; // NOPMD explicit intention that false
+
+  /**
+   * Create a new workload distributor.
+   *
+   * @param keySpace the keyspace for the workload generation.
+   * @param beforeAction the before action for the workload generation.
+   * @param workerAction the action to perform by the workers.
+   */
+  public WorkloadDistributor(
+      final int instances,
+      final ZooKeeper zooKeeper,
+      final KeySpace keySpace,
+      final BeforeAction beforeAction,
+      final BiConsumer<WorkloadDefinition, Integer> workerAction) {
+    this.instances = instances;
+    this.zooKeeper = zooKeeper;
+    this.keySpace = keySpace;
+    this.beforeAction = beforeAction;
+    this.workerAction = workerAction;
+
+    this.client = CuratorFrameworkFactory.builder()
+        .namespace(NAMESPACE)
+        .connectString(this.zooKeeper.getHost() + ":" + this.zooKeeper.getPort())
+        .retryPolicy(new ExponentialBackoffRetry(BASE_SLEEP_TIME_MS, MAX_RETRIES))
+        .build();
+
+    this.client.start();
+
+    try {
+      this.client.blockUntilConnected();
+    } catch (final InterruptedException e) {
+      LOGGER.error(e.getMessage(), e);
+      throw new IllegalStateException(e);
+    }
+
+    this.counter =
+        new DistributedAtomicInteger(this.client, COUNTER_PATH,
+            new ExponentialBackoffRetry(BASE_SLEEP_TIME_MS, MAX_RETRIES));
+  }
+
+  /**
+   * Start the workload distribution.
+   */
+  public void start() {
+    try {
+      AtomicValue<Integer> result = this.counter.increment();
+      while (!result.succeeded()) {
+        result = this.counter.increment();
+      }
+
+      final int workerId = result.preValue();
+
+      final CuratorWatcher watcher = this.buildWatcher(workerId);
+
+      final Stat nodeExists =
+          this.client.checkExists().creatingParentsIfNeeded().forPath(WORKLOAD_PATH);
+      if (nodeExists == null) {
+        this.client.create().forPath(WORKLOAD_PATH);
+      }
+
+      if (workerId == 0) {
+        LOGGER.info("This instance is master with id {}", workerId);
+
+        this.beforeAction.run();
+
+        // register worker action, as master acts also as worker
+        this.client.getChildren().usingWatcher(watcher).forPath(WORKLOAD_PATH);
+
+        LOGGER.info("Number of Workers: {}", this.instances);
+
+        final WorkloadDefinition definition =
+            new WorkloadDefinition(this.keySpace, this.instances);
+
+        this.client.create().withMode(CreateMode.EPHEMERAL).forPath(WORKLOAD_DEFINITION_PATH,
+            definition.toString().getBytes(StandardCharsets.UTF_8));
+
+      } else {
+        LOGGER.info("This instance is worker with id {}", workerId);
+
+        this.client.getChildren().usingWatcher(watcher).forPath(WORKLOAD_PATH);
+
+        final Stat definitionExists =
+            this.client.checkExists().creatingParentsIfNeeded().forPath(WORKLOAD_DEFINITION_PATH);
+
+        if (definitionExists != null) {
+          this.startWorkloadGeneration(workerId);
+        }
+      }
+
+      Thread.sleep(MAX_WAIT_TIME);
+
+      if (!this.workloadGenerationStarted) {
+        LOGGER.warn("No workload definition retrieved for 20 s. Terminating now..");
+      }
+    } catch (final Exception e) { // NOPMD need to catch exception because of external framework
+      LOGGER.error(e.getMessage(), e);
+      throw new IllegalStateException("Error when starting the distribution of the workload.", e);
+    }
+  }
+
+  /**
+   * Start the workload generation. This methods body does only get executed once.
+   *
+   * @param workerId the ID of this worker
+   * @throws Exception when an error occurs
+   */
+  // NOPMD because exception thrown from used framework
+  private synchronized void startWorkloadGeneration(final int workerId) throws Exception { // NOPMD
+
+    if (!this.workloadGenerationStarted) {
+      this.workloadGenerationStarted = true;
+
+      final byte[] bytes =
+          this.client.getData().forPath(WORKLOAD_DEFINITION_PATH);
+      final WorkloadDefinition definition =
+          WorkloadDefinition.fromString(new String(bytes, StandardCharsets.UTF_8));
+
+      this.workerAction.accept(definition, workerId);
+    }
+  }
+
+  /**
+   * Build a curator watcher which performs the worker action.
+   *
+   * @param worker the worker to create the watcher for.
+   * @return the curator watcher.
+   */
+  private CuratorWatcher buildWatcher(final int workerId) {
+    return new CuratorWatcher() {
+
+      @Override
+      public void process(final WatchedEvent event) {
+        if (event.getType() == EventType.NodeChildrenChanged) {
+          try {
+            WorkloadDistributor.this.startWorkloadGeneration(workerId);
+          } catch (final Exception e) { // NOPMD external framework throws exception
+            LOGGER.error(e.getMessage(), e);
+            throw new IllegalStateException("Error starting workload generation.", e);
+          }
+        }
+      }
+    };
+  }
+
+  /**
+   * Stop the workload distributor.
+   */
+  public void stop() {
+    this.client.close();
+  }
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
new file mode 100644
index 0000000000000000000000000000000000000000..2eaa1d487f67ae8325a3622a7ae6c4529fbb1cd6
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
@@ -0,0 +1,56 @@
+package theodolite.commons.workloadgeneration.dimensions;
+
+import theodolite.commons.workloadgeneration.generators.AbstractWorkloadGenerator;
+
+/**
+ * Wrapper class for the definition of the Keys that should be used by the
+ * {@link AbstractWorkloadGenerator}.
+ */
+public class KeySpace {
+
+  private final String prefix;
+  private final int min;
+  private final int max;
+
+
+  /**
+   * Create a new key space. All keys will have the prefix {@code prefix}. The remaining part of
+   * each key will be determined by a number of the interval ({@code min}, {@code max}-1).
+   *
+   * @param prefix the prefix to use for all keys
+   * @param min the lower bound (inclusive) to start counting from
+   * @param max the upper bound (exclusive) to count to
+   */
+  public KeySpace(final String prefix, final int min, final int max) {
+    if (prefix == null || prefix.contains(";")) {
+      throw new IllegalArgumentException(
+          "The prefix must not be null and must not contain the ';' character.");
+    }
+    this.prefix = prefix;
+    this.min = min;
+    this.max = max;
+
+  }
+
+  public KeySpace(final String prefix, final int numberOfKeys) {
+    this(prefix, 0, numberOfKeys - 1);
+  }
+
+  public KeySpace(final int numberOfKeys) {
+    this("sensor_", 0, numberOfKeys - 1);
+  }
+
+  public String getPrefix() {
+    return this.prefix;
+  }
+
+
+  public int getMin() {
+    return this.min;
+  }
+
+
+  public int getMax() {
+    return this.max;
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..7914a4985b6df40f7146c1fd681d1fba063f8b98
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
@@ -0,0 +1,11 @@
+package theodolite.commons.workloadgeneration.functions;
+
+/**
+ * Describes the before action which is executed before every sub experiment.
+ */
+@FunctionalInterface
+public interface BeforeAction {
+
+  public void run();
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..672b579ebbdf3cbb08f3d05d9511c9077f9dac6b
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
@@ -0,0 +1,14 @@
+package theodolite.commons.workloadgeneration.functions;
+
+/**
+ * This interface describes a function that takes meta information from a string (e.g. an ID) and
+ * produces an object of type T.
+ *
+ * @param <T> the type of the objects that will be generated by the function.
+ */
+@FunctionalInterface
+public interface MessageGenerator<T> {
+
+  T generateMessage(final String key);
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
new file mode 100644
index 0000000000000000000000000000000000000000..7e5100a4e99f13a98156311a9d892c9626b2318a
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
@@ -0,0 +1,14 @@
+package theodolite.commons.workloadgeneration.functions;
+
+/**
+ * This interface describes a function that consumes a message {@code T}. This function is dedicated
+ * to be used to transport individual messages to the messaging system.
+ *
+ * @param <T> the type of records to send as messages.
+ */
+@FunctionalInterface
+public interface Transport<T> {
+
+  void transport(final T message);
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..104f1cefb34200a2cf34d1578faecdfdae6ccd56
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
@@ -0,0 +1,138 @@
+package theodolite.commons.workloadgeneration.generators;
+
+import java.time.Duration;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.workloadgeneration.communication.zookeeper.WorkloadDistributor;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+import theodolite.commons.workloadgeneration.functions.Transport;
+import theodolite.commons.workloadgeneration.misc.WorkloadDefinition;
+import theodolite.commons.workloadgeneration.misc.WorkloadEntity;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * Base for workload generators.
+ *
+ * @param <T> The type of records the workload generator is dedicated for.
+ */
+public abstract class AbstractWorkloadGenerator<T>
+    implements WorkloadGenerator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(AbstractWorkloadGenerator.class);
+
+  private final int instances; // NOPMD keep instance variable instead of local variable
+  private final ZooKeeper zooKeeper; // NOPMD keep instance variable instead of local variable
+  private final KeySpace keySpace;// NOPMD keep instance variable instead of local variable
+  private final BeforeAction beforeAction; // NOPMD keep instance variable instead of local variable
+  private final BiFunction<WorkloadDefinition, Integer, List<WorkloadEntity<T>>> workloadSelector;
+  private final MessageGenerator<T> generatorFunction;
+  private final Transport<T> transport;
+  private WorkloadDistributor workloadDistributor; // NOPMD keep instance variable instead of local
+  private final ScheduledExecutorService executor;
+
+  /**
+   * Create a new workload generator.
+   *
+   * @param instances the number of workload-generator instances.
+   * @param zooKeeper the zookeeper connection.
+   * @param keySpace the keyspace.
+   * @param threads the number of threads that is used to generate the load.
+   * @param period the period, how often a new record is emitted.
+   * @param duration the maximum runtime.
+   * @param beforeAction the action to perform before the workload generation starts.
+   * @param generatorFunction the function that is used to generate the individual records.
+   * @param transport the function that is used to send generated messages to the messaging system.
+   */
+  public AbstractWorkloadGenerator(
+      final int instances,
+      final ZooKeeper zooKeeper,
+      final KeySpace keySpace,
+      final int threads,
+      final Duration period,
+      final Duration duration,
+      final BeforeAction beforeAction,
+      final MessageGenerator<T> generatorFunction,
+      final Transport<T> transport) {
+    this.instances = instances;
+    this.zooKeeper = zooKeeper;
+    this.keySpace = keySpace;
+    this.beforeAction = beforeAction;
+    this.generatorFunction = generatorFunction;
+    this.workloadSelector = (workloadDefinition, workerId) -> {
+      final List<WorkloadEntity<T>> workloadEntities = new LinkedList<>();
+
+      for (int i =
+          workloadDefinition.getKeySpace().getMin() + workerId; i <= workloadDefinition
+              .getKeySpace().getMax(); i += workloadDefinition.getNumberOfWorkers()) {
+        final String id = workloadDefinition.getKeySpace().getPrefix() + i;
+        workloadEntities.add(new WorkloadEntity<>(id, this.generatorFunction));
+      }
+
+      return workloadEntities;
+    };
+    this.transport = transport;
+
+    this.executor = Executors.newScheduledThreadPool(threads);
+    final Random random = new Random();
+
+    final int periodMs = (int) period.toMillis();
+
+    LOGGER.info("Period: {}", periodMs);
+
+    final BiConsumer<WorkloadDefinition, Integer> workerAction = (declaration, workerId) -> {
+
+      final List<WorkloadEntity<T>> entities = this.workloadSelector.apply(declaration, workerId);
+
+      LOGGER.info("Beginning of Experiment...");
+      LOGGER.info("Generating records for {} keys.", entities.size());
+      LOGGER.info("Experiment is going to be executed for the specified duration...");
+
+      entities.forEach(entity -> {
+        final long initialDelay = random.nextInt(periodMs);
+        final Runnable task = () -> this.transport.transport(entity.generateMessage());
+        this.executor.scheduleAtFixedRate(task, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+      });
+
+
+      try {
+        this.executor.awaitTermination(duration.getSeconds(), TimeUnit.SECONDS);
+        LOGGER.info("Terminating now...");
+        this.stop();
+      } catch (final InterruptedException e) {
+        LOGGER.error("", e);
+        throw new IllegalStateException("Error when terminating the workload generation.", e);
+      }
+    };
+
+    this.workloadDistributor = new WorkloadDistributor(
+        this.instances,
+        this.zooKeeper,
+        this.keySpace,
+        this.beforeAction,
+        workerAction);
+  }
+
+  /**
+   * Start the workload generation. The generation terminates automatically after the specified
+   * {@code duration}.
+   */
+  @Override
+  public void start() {
+    this.workloadDistributor.start();
+  }
+
+  @Override
+  public void stop() {
+    this.workloadDistributor.stop();
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..944cec6a2dffed886f06fad1e36c9d35375fe15c
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
@@ -0,0 +1,59 @@
+package theodolite.commons.workloadgeneration.generators;
+
+import java.time.Duration;
+import org.apache.avro.specific.SpecificRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * Workload generator for generating load for the kafka messaging system.
+ *
+ * @param <T> The type of records the workload generator is dedicated for.
+ */
+public class KafkaWorkloadGenerator<T extends SpecificRecord>
+    extends AbstractWorkloadGenerator<T> {
+
+  private final KafkaRecordSender<T> recordSender;
+
+  /**
+   * Create a new workload generator.
+   *
+   * @param zooKeeper a reference to the ZooKeeper instance.
+   * @param keySpace the key space to generate the workload for.
+   * @param threads tha amount of threads to use per instance.
+   * @param period the period how often a message is generated for each key specified in the
+   *        {@code keySpace}
+   * @param duration the duration how long the workload generator will emit messages.
+   * @param beforeAction the action which will be performed before the workload generator starts
+   *        generating messages. If {@code null}, no before action will be performed.
+   * @param generatorFunction the generator function. This function is executed, each time a message
+   *        is generated.
+   * @param recordSender the record sender which is used to send the generated messages to kafka.
+   */
+  public KafkaWorkloadGenerator(
+      final int instances,
+      final ZooKeeper zooKeeper,
+      final KeySpace keySpace,
+      final int threads,
+      final Duration period,
+      final Duration duration,
+      final BeforeAction beforeAction,
+      final MessageGenerator<T> generatorFunction,
+      final KafkaRecordSender<T> recordSender) {
+    super(instances, zooKeeper, keySpace, threads, period, duration, beforeAction,
+        generatorFunction,
+        recordSender);
+    this.recordSender = recordSender;
+  }
+
+
+  @Override
+  public void stop() {
+    this.recordSender.terminate();
+
+    super.stop();
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..785087c13480b7149a5726dfce8bbf4307b57933
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
@@ -0,0 +1,185 @@
+package theodolite.commons.workloadgeneration.generators;
+
+import java.time.Duration;
+import java.util.Objects;
+import org.apache.avro.specific.SpecificRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * Builder for {@link workload generators}.
+ *
+ * @param <T> the record for which the builder is dedicated for.
+ */
+public final class KafkaWorkloadGeneratorBuilder<T extends SpecificRecord> { // NOPMD
+
+  private int instances; // NOPMD
+  private ZooKeeper zooKeeper; // NOPMD
+  private KeySpace keySpace; // NOPMD
+  private int threads; // NOPMD
+  private Duration period; // NOPMD
+  private Duration duration; // NOPMD
+  private BeforeAction beforeAction; // NOPMD
+  private MessageGenerator<T> generatorFunction; // NOPMD
+  private KafkaRecordSender<T> kafkaRecordSender; // NOPMD
+
+  private KafkaWorkloadGeneratorBuilder() {
+
+  }
+
+  /**
+   * Get a builder for the {@link KafkaWorkloadGenerator}.
+   *
+   * @return the builder.
+   */
+  public static <T extends SpecificRecord> KafkaWorkloadGeneratorBuilder<T> builder() {
+    return new KafkaWorkloadGeneratorBuilder<>();
+  }
+
+  /**
+   * Set the number of instances.
+   *
+   * @param instances the number of instances.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> instances(final int instances) {
+    this.instances = instances;
+    return this;
+  }
+
+  /**
+   * Set the ZooKeeper reference.
+   *
+   * @param zooKeeper a reference to the ZooKeeper instance.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> zooKeeper(final ZooKeeper zooKeeper) {
+    this.zooKeeper = zooKeeper;
+    return this;
+  }
+
+  /**
+   * Set the before action for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param beforeAction the {@link BeforeAction}.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> beforeAction(final BeforeAction beforeAction) {
+    this.beforeAction = beforeAction;
+    return this;
+  }
+
+  /**
+   * Set the key space for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param keySpace the {@link KeySpace}.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> keySpace(final KeySpace keySpace) {
+    this.keySpace = keySpace;
+    return this;
+  }
+
+  /**
+   * Set the key space for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param threads the number of threads.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> threads(final int threads) {
+    this.threads = threads;
+    return this;
+  }
+
+  /**
+   * Set the period for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param period the {@link Period}
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> period(final Duration period) {
+    this.period = period;
+    return this;
+  }
+
+  /**
+   * Set the durtion for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param duration the {@link Duration}.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> duration(final Duration duration) {
+    this.duration = duration;
+    return this;
+  }
+
+  /**
+   * Set the generator function for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param generatorFunction the generator function.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> generatorFunction(
+      final MessageGenerator<T> generatorFunction) {
+    this.generatorFunction = generatorFunction;
+    return this;
+  }
+
+  /**
+   * Set the {@link KafkaRecordSender} for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param kafkaRecordSender the record sender to use.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> kafkaRecordSender(
+      final KafkaRecordSender<T> kafkaRecordSender) {
+    this.kafkaRecordSender = kafkaRecordSender;
+    return this;
+  }
+
+  /**
+   * Build the actual {@link KafkaWorkloadGenerator}. The following parameters are must be
+   * specicified before this method is called:
+   * <ul>
+   * <li>zookeeper</li>
+   * <li>key space</li>
+   * <li>period</li>
+   * <li>duration</li>
+   * <li>generator function</li>
+   * <li>kafka record sender</li>
+   * </ul>
+   *
+   * @return the built instance of the {@link KafkaWorkloadGenerator}.
+   */
+  public KafkaWorkloadGenerator<T> build() {
+    if (this.instances < 1) { // NOPMD
+      throw new IllegalArgumentException(
+          "Please specify a valid number of instances. Currently: " + this.instances);
+    }
+    Objects.requireNonNull(this.zooKeeper, "Please specify the ZooKeeper instance.");
+    if (this.threads < 1) { // NOPMD
+      this.threads = 1;
+    }
+    Objects.requireNonNull(this.keySpace, "Please specify the key space.");
+    Objects.requireNonNull(this.period, "Please specify the period.");
+    Objects.requireNonNull(this.duration, "Please specify the duration.");
+    this.beforeAction = Objects.requireNonNullElse(this.beforeAction, () -> {
+    });
+    Objects.requireNonNull(this.generatorFunction, "Please specify the generator function.");
+    Objects.requireNonNull(this.kafkaRecordSender, "Please specify the kafka record sender.");
+
+    return new KafkaWorkloadGenerator<>(
+        this.instances,
+        this.zooKeeper,
+        this.keySpace,
+        this.threads,
+        this.period,
+        this.duration,
+        this.beforeAction,
+        this.generatorFunction,
+        this.kafkaRecordSender);
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..b121ac157b84d64818d9fdfc90589d49fd933752
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
@@ -0,0 +1,18 @@
+package theodolite.commons.workloadgeneration.generators;
+
+/**
+ * Base methods for workload generators.
+ */
+public interface WorkloadGenerator {
+
+  /**
+   * Start the workload generation.
+   */
+  void start();
+
+  /**
+   * Stop the workload generation.
+   */
+  void stop();
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
new file mode 100644
index 0000000000000000000000000000000000000000..86369d6c883954b792b2ee0fd6a988377ecb8965
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
@@ -0,0 +1,71 @@
+package theodolite.commons.workloadgeneration.misc;
+
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+
+/**
+ * The central class that contains all information that needs to be exchanged between the nodes for
+ * distributed workload generation.
+ */
+public class WorkloadDefinition {
+  private static final int ZERO = 0;
+  private static final int ONE = 1;
+  private static final int TWO = 2;
+  private static final int THREE = 3;
+  private static final int FOUR = 4;
+
+  private final KeySpace keySpace;
+  private final int numberOfWorkers;
+
+  /**
+   * Create a new workload definition.
+   *
+   * @param keySpace the key space to use.
+   * @param numberOfWorkers the number of workers participating in the workload generation.
+   */
+  public WorkloadDefinition(final KeySpace keySpace, final int numberOfWorkers) {
+
+    this.keySpace = keySpace;
+    this.numberOfWorkers = numberOfWorkers;
+  }
+
+  public KeySpace getKeySpace() {
+    return this.keySpace;
+  }
+
+  public int getNumberOfWorkers() {
+    return this.numberOfWorkers;
+  }
+
+  /**
+   * Simple method for encoding all information of the workload definition into one string.
+   *
+   * @return a string that encodes all information of the workload generation in a compact format.
+   *         The format is 'keySpace;keySpace.min;keySpace.max;numberOfWorkers'.
+   */
+  @Override
+  public String toString() {
+    return this.getKeySpace().getPrefix() + ";" + this.getKeySpace().getMin() + ";"
+        + this.getKeySpace().getMax() + ";" + this.getNumberOfWorkers();
+  }
+
+  /**
+   * Parse a workload generation from a previously encoded string with the format returned by
+   * {@link WorkloadDefinition#toString()}.
+   *
+   * @param workloadDefinitionString the workload definition string.
+   * @return the parsed workload definition.
+   */
+  public static WorkloadDefinition fromString(final String workloadDefinitionString) {
+    final String[] deserialized = workloadDefinitionString.split(";");
+
+    if (deserialized.length != FOUR) {
+      throw new IllegalArgumentException(
+          "Wrong workload definition string when trying to parse the workload generation.");
+    }
+
+    return new WorkloadDefinition(
+        new KeySpace(deserialized[ZERO], Integer.valueOf(deserialized[ONE]),
+            Integer.valueOf(deserialized[TWO])),
+        Integer.valueOf(deserialized[THREE]));
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8665b3fb53e7d15ed61780e3b91fbfe56f709ba
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
@@ -0,0 +1,22 @@
+package theodolite.commons.workloadgeneration.misc;
+
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+
+/**
+ * Representation of a entity of the workload generation that generates load for one fixed key.
+ *
+ * @param <T> The type of records the workload generator is dedicated for.
+ */
+public class WorkloadEntity<T> {
+  private final String key;
+  private final MessageGenerator<T> generator;
+
+  public WorkloadEntity(final String key, final MessageGenerator<T> generator) {
+    this.key = key;
+    this.generator = generator;
+  }
+
+  public T generateMessage() {
+    return this.generator.generateMessage(this.key);
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
new file mode 100644
index 0000000000000000000000000000000000000000..a80490600ad9c9c22c198fc76b6d9f73bdc30584
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
@@ -0,0 +1,29 @@
+package theodolite.commons.workloadgeneration.misc;
+
+/**
+ * Wrapper for connection information for ZooKeeper.
+ */
+public class ZooKeeper {
+
+  private final String host;
+  private final int port;
+
+  /**
+   * Create a new representation of an ZooKeeper instance.
+   *
+   * @param host of zookeeper.
+   * @param port of zookeeper.
+   */
+  public ZooKeeper(final String host, final int port) {
+    this.host = host;
+    this.port = port;
+  }
+
+  public String getHost() {
+    return this.host;
+  }
+
+  public int getPort() {
+    return this.port;
+  }
+}