diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index f9ac712bd1c2fb00225cd84aa1f5a5eda9fb0ded..f7e431002e7bf214f377b7458d2eba235b7b6050 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -92,12 +92,15 @@ spotbugs:
   script:
     - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
     - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
-    - docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest
-    - docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
+    - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
+    - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
     - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
     - docker push $DOCKERHUB_ORG/$IMAGE_NAME
     - docker logout
   rules:
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
       - application-kafkastreams-commons/**/*
@@ -113,6 +116,8 @@ deploy-uc1-kstreams-app:
     IMAGE_NAME: "theodolite-uc1-kstreams-app"
     JAVA_PROJECT_NAME: "uc1-application"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc1-application/**/*
       - application-kafkastreams-commons/**/*
@@ -128,6 +133,8 @@ deploy-uc2-kstreams-app:
     IMAGE_NAME: "theodolite-uc2-kstreams-app"
     JAVA_PROJECT_NAME: "uc2-application"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc2-application/**/*
       - application-kafkastreams-commons/**/*
@@ -143,6 +150,8 @@ deploy-uc3-kstreams-app:
     IMAGE_NAME: "theodolite-uc3-kstreams-app"
     JAVA_PROJECT_NAME: "uc3-application"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc3-application/**/*
       - application-kafkastreams-commons/**/*
@@ -158,6 +167,8 @@ deploy-uc4-kstreams-app:
     IMAGE_NAME: "theodolite-uc4-kstreams-app"
     JAVA_PROJECT_NAME: "uc4-application"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc4-application/**/*
       - application-kafkastreams-commons/**/*
@@ -173,6 +184,8 @@ deploy-uc1-workload-generator:
     IMAGE_NAME: "theodolite-uc1-workload-generator"
     JAVA_PROJECT_NAME: "uc1-workload-generator"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc1-workload-generator/**/*
       - application-kafkastreams-commons/**/*
@@ -188,6 +201,8 @@ deploy-uc2-workload-generator:
     IMAGE_NAME: "theodolite-uc2-workload-generator"
     JAVA_PROJECT_NAME: "uc2-workload-generator"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc2-workload-generator/**/*
       - application-kafkastreams-commons/**/*
@@ -203,6 +218,8 @@ deploy-uc3-workload-generator:
     IMAGE_NAME: "theodolite-uc3-workload-generator"
     JAVA_PROJECT_NAME: "uc3-workload-generator"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc3-workload-generator/**/*
       - application-kafkastreams-commons/**/*
@@ -218,6 +235,8 @@ deploy-uc4-workload-generator:
     IMAGE_NAME: "theodolite-uc4-workload-generator"
     JAVA_PROJECT_NAME: "uc4-workload-generator"
   rules: # hope this can be simplified soon, see #51
+    - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
+      when: always
     - changes:
       - uc4-workload-generator/**/*
       - application-kafkastreams-commons/**/*
diff --git a/.settings/org.eclipse.jdt.ui.prefs b/.settings/org.eclipse.jdt.ui.prefs
index 98b5ca8064a352aacfe2aebd13fbd0a87735fc3e..4e04e2891754324a6e1bf55348b6a38f592bb301 100644
--- a/.settings/org.eclipse.jdt.ui.prefs
+++ b/.settings/org.eclipse.jdt.ui.prefs
@@ -101,7 +101,7 @@ sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
 sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
 sp_cleanup.remove_private_constructors=true
 sp_cleanup.remove_redundant_modifiers=false
-sp_cleanup.remove_redundant_semicolons=false
+sp_cleanup.remove_redundant_semicolons=true
 sp_cleanup.remove_redundant_type_arguments=true
 sp_cleanup.remove_trailing_whitespaces=true
 sp_cleanup.remove_trailing_whitespaces_all=true
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/ConfigurationKeys.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
similarity index 71%
rename from uc4-application/src/main/java/theodolite/uc4/application/ConfigurationKeys.java
rename to application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
index bc5fee1f2cb4367284e9db60f575f2652b1bd05b..260dbba9c1f094ac14679b6c7c4637046a687eee 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/ConfigurationKeys.java
+++ b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/ConfigurationKeys.java
@@ -1,31 +1,44 @@
-package theodolite.uc4.application;
+package theodolite.commons.kafkastreams;
 
 /**
  * Keys to access configuration parameters.
  */
 public final class ConfigurationKeys {
-
+  // Common keys
   public static final String APPLICATION_NAME = "application.name";
 
   public static final String APPLICATION_VERSION = "application.version";
 
+  public static final String NUM_THREADS = "num.threads";
+
+  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
+
   public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
 
+  public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
+
   public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
 
+  // Additional topics
   public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
 
-  public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
+  public static final String CONFIGURATION_KAFKA_TOPIC = "configuration.kafka.topic";
 
-  public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
+  // UC2
+  public static final String WINDOW_SIZE_MS = "window.size.ms";
 
-  public static final String NUM_THREADS = "num.threads";
+  public static final String WINDOW_GRACE_MS = "window.grace.ms";
 
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+  // UC3
+  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
 
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
+  // UC4
+  public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
+
+  public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
 
-  public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
 
   private ConfigurationKeys() {}
 
diff --git a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
index ae2a6dafa3d36dada927d17a1ca00d2df63db78b..8c758c24444ea9c590c364063a397f9b7bfec8f9 100644
--- a/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
+++ b/application-kafkastreams-commons/src/main/java/theodolite/commons/kafkastreams/KafkaStreamsBuilder.java
@@ -13,6 +13,8 @@ import titan.ccp.common.kafka.streams.PropertiesBuilder;
 public abstract class KafkaStreamsBuilder {
 
   // Kafkastreams application specific
+  protected String schemaRegistryUrl; // NOPMD for use in subclass
+
   private String applicationName; // NOPMD
   private String applicationVersion; // NOPMD
   private String bootstrapServers; // NOPMD
@@ -55,6 +57,17 @@ public abstract class KafkaStreamsBuilder {
     return this;
   }
 
+  /**
+   * Sets the URL for the schema registry.
+   *
+   * @param url The URL of the schema registry.
+   * @return
+   */
+  public KafkaStreamsBuilder schemaRegistry(final String url) {
+    this.schemaRegistryUrl = url;
+    return this;
+  }
+
   /**
    * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
    * one for using the default.
@@ -131,9 +144,10 @@ public abstract class KafkaStreamsBuilder {
    */
   public KafkaStreams build() {
     // Check for required attributes for building properties.
-    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
     Objects.requireNonNull(this.applicationName, "Application name has not been set.");
     Objects.requireNonNull(this.applicationVersion, "Application version has not been set.");
+    Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
+    Objects.requireNonNull(this.schemaRegistryUrl, "Schema registry has not been set.");
 
     // Create the Kafka streams instance.
     return new KafkaStreams(this.buildTopology(), this.buildProperties());
diff --git a/build.gradle b/build.gradle
index 378db78373409b7f532e70f2e5e01cf0085a9f5f..9311474c4c23d8c3400768b1f7d2d538fd5597e6 100644
--- a/build.gradle
+++ b/build.gradle
@@ -12,9 +12,10 @@ buildscript {
 
 // Variables used to distinct different subprojects
 def useCaseProjects = subprojects.findAll {it -> it.name.matches('uc(.)*')}
+def useCaseApplications = subprojects.findAll {it -> it.name.matches('uc[0-9]+-application')}
+def useCaseGenerators = subprojects.findAll {it -> it.name.matches('uc[0-9]+-workload-generator*')}
 def commonProjects = subprojects.findAll {it -> it.name.matches('(.)*commons(.)*')}
 
-
 // Plugins
 allprojects {
   apply plugin: 'eclipse'
@@ -51,17 +52,18 @@ allprojects {
 	    maven {
 	    	url "https://oss.sonatype.org/content/repositories/snapshots/"
 	    }
+      maven {
+        url 'https://packages.confluent.io/maven/'
+    }
 	}
 }
 
-// Dependencies for all use cases
-configure(useCaseProjects) {
+// Dependencies for all use case applications
+configure(useCaseApplications) {
   dependencies {
-      // These dependencies are exported to consumers, that is to say found on their compile classpath.
-      api('org.industrial-devops:titan-ccp-common:0.0.4-SNAPSHOT') { changing = true }
-
       // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-      implementation 'org.apache.kafka:kafka-clients:2.1.0'
+      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
       implementation 'com.google.guava:guava:24.1-jre'
       implementation 'org.jctools:jctools-core:2.1.1'
       implementation 'org.slf4j:slf4j-simple:1.6.1'
@@ -72,15 +74,31 @@ configure(useCaseProjects) {
   }
 }
 
+// Dependencies for all use case generators
+configure(useCaseGenerators) {
+  dependencies {
+      // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
+      implementation 'com.google.guava:guava:24.1-jre'
+      implementation 'org.jctools:jctools-core:2.1.1'
+      implementation 'org.slf4j:slf4j-simple:1.6.1'
+
+      // These dependencies are used for the workload-generator-commmon
+      implementation project(':workload-generator-commons')
+
+      // Use JUnit test framework
+      testImplementation 'junit:junit:4.12'
+  }
+}
+
 // Dependencies for all commons
 configure(commonProjects) {
   dependencies {
-      // These dependencies is exported to consumers, that is to say found on their compile classpath.
-      api 'org.apache.kafka:kafka-clients:2.4.0'
-
       // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
       implementation 'org.slf4j:slf4j-simple:1.6.1'
-      implementation('org.industrial-devops:titan-ccp-common:0.0.4-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
+      implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
 
       // Use JUnit test framework
       testImplementation 'junit:junit:4.12'
diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/docker-test/uc1-docker-compose/docker-compose.yml
index ba288cb83cf649030577e6331fee49f46316ee52..d394255951151d931b73e4c923bb10ecaed66a2c 100755
--- a/docker-test/uc1-docker-compose/docker-compose.yml
+++ b/docker-test/uc1-docker-compose/docker-compose.yml
@@ -16,11 +16,11 @@ services:
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
       KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
   uc-app:
-    image: benediktwetzel/uc1-app:latest
+    image: theodolite/theodolite-uc1-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
-    image: benediktwetzel/uc1-wg:latest
+    image: theodolite/theodolite-uc1-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       NUM_SENSORS: 1
diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/docker-test/uc2-docker-compose/docker-compose.yml
index 20a7a73c99c102fe90fa3d4eaa9935dba5298a94..f730148a89d41a819d81a4770e0d53a960dbe493 100755
--- a/docker-test/uc2-docker-compose/docker-compose.yml
+++ b/docker-test/uc2-docker-compose/docker-compose.yml
@@ -16,11 +16,11 @@ services:
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
       KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
   uc-app:
-    image: benediktwetzel/uc2-app:latest
+    image: theodolite/theodolite-uc2-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
   uc-wg: 
-    image: benediktwetzel/uc2-wg:latest
+    image: theodolite/theodolite-uc2-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/docker-test/uc3-docker-compose/docker-compose.yml
index 2cb0d883acc38e0d24434faf4e7af82ff3c42a81..2a3cb23a79f9edda699fe1bb07c1b922614aeb13 100755
--- a/docker-test/uc3-docker-compose/docker-compose.yml
+++ b/docker-test/uc3-docker-compose/docker-compose.yml
@@ -16,12 +16,12 @@ services:
       KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
       KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
   uc-app:
-    image: benediktwetzel/uc3-app:latest
+    image: theodolite/theodolite-uc3-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       KAFKA_WINDOW_DURATION_MINUTES: 60
   uc-wg: 
-    image: benediktwetzel/uc3-wg:latest
+    image: theodolite/theodolite-uc3-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       NUM_SENSORS: 1
\ No newline at end of file
diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/docker-test/uc4-docker-compose/docker-compose.yml
index be945cefe92fe75503187fb6b94ff6c951e1b8f2..1f015f23b2e8b98eba27ae6f387adb123ae2ccc2 100755
--- a/docker-test/uc4-docker-compose/docker-compose.yml
+++ b/docker-test/uc4-docker-compose/docker-compose.yml
@@ -26,11 +26,11 @@ services:
       SCHEMA_REGISTRY_HOST_NAME: schema-registry
       SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
   uc-app:
-    image: soerenhenning/uc4-app:latest #TODO
+    image: theodolite/theodolite-uc4-kstreams-app:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
       SCHEMA_REGISTRY_URL: http://schema-registry:8081
   uc-wg: 
-    image: soerenhenning/uc4-wg:latest #TODO
+    image: theodolite/theodolite-uc4-workload-generator:latest
     environment:
       KAFKA_BOOTSTRAP_SERVERS: kafka:9092
diff --git a/docs/release-process.md b/docs/release-process.md
new file mode 100644
index 0000000000000000000000000000000000000000..097890f5fb446f69902c0537fefe4f0f0a2c2bd5
--- /dev/null
+++ b/docs/release-process.md
@@ -0,0 +1,18 @@
+# Release Process
+
+We assume that we are creating the release `v0.1.1`. Please make sure to update
+to modify the following steps according to the release, you are actually
+performing.
+
+1. Create a new branch `v0.1` if not already exists. This branch will never
+again be merged into master.
+
+2. Checkout the `v0.1 branch.
+
+3. Update all references to Theodolite Docker images to tag `v0-1-1`. These are
+mainly the Kubernetes resource definitions in `execution` as well as the Docker
+Compose files in `docker-test`.
+
+4. Commit these changes.
+
+5. Tag this commit with `v0.1.1`. The corresponding Docker images will be uploaded.
diff --git a/execution/README.md b/execution/README.md
index 89a851a9c8bafd29a4232f142e7b0da8b88c1132..af4caaa80db9cbf6459813f7c1c9330ad7769fda 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -142,10 +142,10 @@ Depending on your setup, some additional adjustments may be necessary:
 
 ## Execution
 
-The `./run_loop.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
+The `./theodolite.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
 
 ```sh
-./run_loop.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
+./theodolite.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
 ```
 
 * `<use-case>`: Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`.
diff --git a/execution/run_uc1-new.sh b/execution/run_uc1.sh
similarity index 86%
rename from execution/run_uc1-new.sh
rename to execution/run_uc1.sh
index 564e03a470723f2b4564ccf96d31b66fa7dd7d2f..04eb86edc9bb5653f3281793bf48655bca643391 100755
--- a/execution/run_uc1-new.sh
+++ b/execution/run_uc1.sh
@@ -85,6 +85,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else 
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc2-new.sh b/execution/run_uc2.sh
similarity index 78%
rename from execution/run_uc2-new.sh
rename to execution/run_uc2.sh
index aca65894b5d791eb20fd97b9bc9ab279f693eda7..68f592cc963847f56f316e3c214b2b4bb1d64fc6 100755
--- a/execution/run_uc2-new.sh
+++ b/execution/run_uc2.sh
@@ -26,7 +26,12 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
 
 # Start workload generator
 NUM_NESTED_GROUPS=$DIM_VALUE
-sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deployment.yaml | kubectl apply -f -
+WL_MAX_RECORDS=150000
+APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS))
+WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
+
+WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc2-workload-generator/deployment.yaml)
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 
 # Start application
 REPLICAS=$INSTANCES
@@ -48,7 +53,8 @@ python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
 # Stop wl and app
-kubectl delete -f uc2-workload-generator/deployment.yaml
+#sed "s/{{INSTANCES}}/1/g" uc2-workload-generator/deployment.yaml | kubectl delete -f -
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
 kubectl delete -f uc2-application/aggregation-service.yaml
 kubectl delete -f uc2-application/jmx-configmap.yaml
 kubectl delete -f uc2-application/service-monitor.yaml
@@ -78,6 +84,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else 
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc3-new.sh b/execution/run_uc3.sh
similarity index 86%
rename from execution/run_uc3-new.sh
rename to execution/run_uc3.sh
index 79500eb508e39d9460c965494a4b7d0b34b6585a..f214e20b3af93b0f89d76d6ea50ce3d7cd428ded 100755
--- a/execution/run_uc3-new.sh
+++ b/execution/run_uc3.sh
@@ -86,6 +86,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else 
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc4-new.sh b/execution/run_uc4.sh
similarity index 79%
rename from execution/run_uc4-new.sh
rename to execution/run_uc4.sh
index 664d866f88d894eda37a30a72875151f1d545e98..04fd130694e96285ca93b7561f1ea58ccdb30ab8 100755
--- a/execution/run_uc4-new.sh
+++ b/execution/run_uc4.sh
@@ -26,8 +26,11 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
 
 # Start workload generator
 NUM_SENSORS=$DIM_VALUE
-#NUM_SENSORS=xy
-sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml | kubectl apply -f -
+WL_MAX_RECORDS=150000
+WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
+
+WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc4-workload-generator/deployment.yaml)
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 
 # Start application
 REPLICAS=$INSTANCES
@@ -51,7 +54,8 @@ python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
 deactivate
 
 # Stop wl and app
-kubectl delete -f uc4-workload-generator/deployment.yaml
+#sed "s/{{INSTANCES}}/1/g" uc4-workload-generator/deployment.yaml | kubectl delete -f -
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
 kubectl delete -f uc4-application/aggregation-service.yaml
 kubectl delete -f uc4-application/jmx-configmap.yaml
 kubectl delete -f uc4-application/service-monitor.yaml
@@ -81,6 +85,33 @@ do
 done
 echo "Finish topic deletion, print topics:"
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+
+# delete zookeeper nodes used for workload generation
+echo "Delete ZooKeeper configurations used for workload generation"
+kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
+echo "Waiting for deletion"
+
+while [ true ]
+do
+    IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
+    found=0
+    for element in "${array[@]}"
+    do
+        if [ "$element" == "workload-generation" ]; then
+                found=1
+                break
+        fi
+    done
+    if [ $found -ne 1 ]; then
+        echo "ZooKeeper reset was successful."
+        break
+    else 
+        echo "ZooKeeper reset was not successful. Retrying in 5s."
+        sleep 5s
+    fi
+done
+echo "Deletion finished"
+
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_loop.sh b/execution/theodolite.sh
similarity index 98%
rename from execution/run_loop.sh
rename to execution/theodolite.sh
index 9004e5b272a021467a06db532e59a2ad9e36453a..d382d9f640167cee51fc68938d69c51bfb427a03 100755
--- a/execution/run_loop.sh
+++ b/execution/theodolite.sh
@@ -22,4 +22,4 @@ print("Going to execute " + str(len(dim_values)*len(replicas)) + " subexperiment
 
 experiment_config = ExperimentConfig(uc, dim_values, replicas, partitions, cpu_limit, memory_limit, kafka_streams_commit_interval_ms, execution_minutes, default_strategy, subexperiment_executor)
 executor = ExperimentExecutor(experiment_config)
-executor.execute()
\ No newline at end of file
+executor.execute()
diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc1-application/aggregation-deployment.yaml
index d5bccca4a72f6a47a855ed8a7ca47fac4a8a19ca..bcb0a955de0d5ce64fe6bdcba1e537468c833e5b 100644
--- a/execution/uc1-application/aggregation-deployment.yaml
+++ b/execution/uc1-application/aggregation-deployment.yaml
@@ -15,13 +15,15 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: uc1-application
-        image: "soerenhenning/uc1-app:latest"
+        image: "theodolite/theodolite-uc1-kstreams-app:latest"
         ports:
         - containerPort: 5555
           name: jmx
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: COMMIT_INTERVAL_MS
           value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
@@ -50,4 +52,4 @@ spec:
       volumes:
         - name: jmx-config
           configMap:
-            name: aggregation-jmx-configmap
\ No newline at end of file
+            name: aggregation-jmx-configmap
diff --git a/execution/uc1-workload-generator/deployment.yaml b/execution/uc1-workload-generator/deployment.yaml
index a0fde4bbf9765b2bb56bd36acde430d97169f34b..e8326926e7bdb1b49be2d1c03f4a8e26ca77a2a6 100644
--- a/execution/uc1-workload-generator/deployment.yaml
+++ b/execution/uc1-workload-generator/deployment.yaml
@@ -1,12 +1,11 @@
 apiVersion: apps/v1
-kind: StatefulSet
+kind: Deployment
 metadata:
   name: titan-ccp-load-generator
 spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  serviceName: titan-ccp-load-generator
   replicas: {{INSTANCES}}
   template:
     metadata:
@@ -16,10 +15,16 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: soerenhenning/uc1-wg:latest 
+        image: theodolite/theodolite-uc1-workload-generator:latest
         env:
+        - name: ZK_HOST
+          value: "my-confluent-cp-zookeeper"
+        - name: ZK_PORT
+          value: "2181"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: NUM_SENSORS
           value: "{{NUM_SENSORS}}"
         - name: POD_NAME
@@ -28,4 +33,3 @@ spec:
               fieldPath: metadata.name
         - name: INSTANCES
           value: "{{INSTANCES}}"
-          
\ No newline at end of file
diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml
index ce52421731ea5fc044c435ad10adb311e7e7e878..199966a31d0ccac1f5bb8e3b1c0e17e1cae1f8c9 100644
--- a/execution/uc2-application/aggregation-deployment.yaml
+++ b/execution/uc2-application/aggregation-deployment.yaml
@@ -15,13 +15,15 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: uc2-application
-        image: "benediktwetzel/uc2-app:latest"
+        image: "theodolite/theodolite-uc2-kstreams-app:latest"
         ports:
         - containerPort: 5555
           name: jmx
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: COMMIT_INTERVAL_MS
           value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
@@ -50,4 +52,4 @@ spec:
       volumes:
         - name: jmx-config
           configMap:
-            name: aggregation-jmx-configmap
\ No newline at end of file
+            name: aggregation-jmx-configmap
diff --git a/execution/uc2-workload-generator/deployment.yaml b/execution/uc2-workload-generator/deployment.yaml
index 52592626f2a6bf93415c29f5bb4f020b527a5899..a7bf66f5e47a6fadfcd294366a3cfdefeaca656a 100644
--- a/execution/uc2-workload-generator/deployment.yaml
+++ b/execution/uc2-workload-generator/deployment.yaml
@@ -6,7 +6,7 @@ spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  replicas: 1
+  replicas: {{INSTANCES}}
   template:
     metadata:
       labels:
@@ -15,14 +15,23 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: benediktwetzel/uc2-wg:latest 
+        image: theodolite/theodolite-uc2-workload-generator:latest
         env:
+        - name: ZK_HOST
+          value: "my-confluent-cp-zookeeper"
+        - name: ZK_PORT
+          value: "2181"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
-        - name: HIERARCHY
-          value: "full"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: NUM_SENSORS
           value: "4"
         - name: NUM_NESTED_GROUPS
           value: "{{NUM_NESTED_GROUPS}}"
-          
\ No newline at end of file
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: INSTANCES
+          value: "{{INSTANCES}}"
diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml
index 0f3327af3119df125e3431574e3e406183abc132..a535b5b6443e89564d4bb0cbe17593c60dc289dc 100644
--- a/execution/uc3-application/aggregation-deployment.yaml
+++ b/execution/uc3-application/aggregation-deployment.yaml
@@ -15,13 +15,15 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: uc3-application
-        image: "soerenhenning/uc3-app:latest"
+        image: "theodolite/theodolite-uc3-kstreams-app:latest"
         ports:
         - containerPort: 5555
           name: jmx
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: KAFKA_WINDOW_DURATION_MINUTES
           value: "1"
         - name: COMMIT_INTERVAL_MS
@@ -52,4 +54,4 @@ spec:
       volumes:
         - name: jmx-config
           configMap:
-            name: aggregation-jmx-configmap
\ No newline at end of file
+            name: aggregation-jmx-configmap
diff --git a/execution/uc3-workload-generator/deployment.yaml b/execution/uc3-workload-generator/deployment.yaml
index 9ecd2b67e757c94221e36edcfcfd43c22782270a..d323fd089eeaa4542db5a645fb3b08885b8eff26 100644
--- a/execution/uc3-workload-generator/deployment.yaml
+++ b/execution/uc3-workload-generator/deployment.yaml
@@ -1,12 +1,11 @@
 apiVersion: apps/v1
-kind: StatefulSet
+kind: Deployment
 metadata:
   name: titan-ccp-load-generator
 spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  serviceName: titan-ccp-load-generator
   replicas: {{INSTANCES}}
   template:
     metadata:
@@ -16,10 +15,16 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: soerenhenning/uc3-wg:latest 
+        image: theodolite/theodolite-uc3-workload-generator:latest
         env:
+        - name: ZK_HOST
+          value: "my-confluent-cp-zookeeper"
+        - name: ZK_PORT
+          value: "2181"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: NUM_SENSORS
           value: "{{NUM_SENSORS}}"
         - name: POD_NAME
@@ -28,4 +33,3 @@ spec:
               fieldPath: metadata.name
         - name: INSTANCES
           value: "{{INSTANCES}}"
-          
\ No newline at end of file
diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml
index f7a750c790b6a9eab8453fa91e05176de665104e..5f71737046e12b7f0116d59c4b55f0c0de39bbd2 100644
--- a/execution/uc4-application/aggregation-deployment.yaml
+++ b/execution/uc4-application/aggregation-deployment.yaml
@@ -15,13 +15,15 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: uc4-application
-        image: "soerenhenning/uc4-app:latest"
+        image: "theodolite/theodolite-uc4-kstreams-app:latest"
         ports:
         - containerPort: 5555
           name: jmx
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: AGGREGATION_DURATION_DAYS
           value: "3" #AGGREGATION_DURATION_DAYS
         - name: AGGREGATION_DURATION_ADVANCE
@@ -54,4 +56,4 @@ spec:
       volumes:
         - name: jmx-config
           configMap:
-            name: aggregation-jmx-configmap
\ No newline at end of file
+            name: aggregation-jmx-configmap
diff --git a/execution/uc4-workload-generator/deployment.yaml b/execution/uc4-workload-generator/deployment.yaml
index 6400abc345dcfb902364d3225bc6eb174380eb8b..98747b3922d439144e783b0e637cbe68e46f1b88 100644
--- a/execution/uc4-workload-generator/deployment.yaml
+++ b/execution/uc4-workload-generator/deployment.yaml
@@ -6,7 +6,7 @@ spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  replicas: 1
+  replicas: {{INSTANCES}}
   template:
     metadata:
       labels:
@@ -15,10 +15,21 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: soerenhenning/uc4-wg:latest 
+        image: theodolite/theodolite-uc4-workload-generator:latest
         env:
+        - name: ZK_HOST
+          value: "my-confluent-cp-zookeeper"
+        - name: ZK_PORT
+          value: "2181"
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
+        - name: SCHEMA_REGISTRY_URL
+          value: "http://my-confluent-cp-schema-registry:8081"
         - name: NUM_SENSORS
           value: "{{NUM_SENSORS}}"
-          
\ No newline at end of file
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: INSTANCES
+          value: "{{INSTANCES}}"
diff --git a/settings.gradle b/settings.gradle
index 51112256b1a124d07ad80caf7ac0ccaf697858d3..9104525ce160a25957f9731f820a723b4f36f7d5 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -1,5 +1,6 @@
 rootProject.name = 'scalability-benchmarking'
 
+include 'workload-generator-commons'
 include 'application-kafkastreams-commons'
 
 include 'uc1-workload-generator'
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/ConfigurationKeys.java b/uc1-application/src/main/java/theodolite/uc1/application/ConfigurationKeys.java
deleted file mode 100644
index ee4113c3088629fe01988721e32d9704f5d30da5..0000000000000000000000000000000000000000
--- a/uc1-application/src/main/java/theodolite/uc1/application/ConfigurationKeys.java
+++ /dev/null
@@ -1,25 +0,0 @@
-package theodolite.uc1.application;
-
-/**
- * Keys to access configuration parameters.
- */
-public final class ConfigurationKeys {
-
-  public static final String APPLICATION_NAME = "application.name";
-
-  public static final String APPLICATION_VERSION = "application.version";
-
-  public static final String NUM_THREADS = "num.threads";
-
-  public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
-
-  public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
-
-  public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
-
-  public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
-
-
-  private ConfigurationKeys() {}
-
-}
diff --git a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java b/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
index b551fb7f8ff74f5ddc7e3aad901c1412075c6da6..a35cc37b36fb906e5c5495006126374d4de4656c 100644
--- a/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
+++ b/uc1-application/src/main/java/theodolite/uc1/application/HistoryService.java
@@ -3,8 +3,9 @@ package theodolite.uc1.application;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc1.streamprocessing.Uc1KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -13,7 +14,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class HistoryService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
 
@@ -40,6 +41,7 @@ public class HistoryService {
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .build();
 
     this.stopEvent.thenRun(kafkaStreams::close);
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
index 824a8dadd4d80dd29d09b21543fa6da6aedf5365..1c30e0c2c83b3d8a2f3dca4df0c7aec99cc4f450 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
+++ b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/TopologyBuilder.java
@@ -7,8 +7,8 @@ import org.apache.kafka.streams.Topology;
 import org.apache.kafka.streams.kstream.Consumed;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.ActivePowerRecordFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
@@ -18,14 +18,19 @@ public class TopologyBuilder {
   private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
 
   private final String inputTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
+
   private final Gson gson = new Gson();
   private final StreamsBuilder builder = new StreamsBuilder();
 
+
   /**
    * Create a new {@link TopologyBuilder} using the given topics.
    */
-  public TopologyBuilder(final String inputTopic) {
+  public TopologyBuilder(final String inputTopic,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory) {
     this.inputTopic = inputTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
   }
 
   /**
@@ -35,7 +40,7 @@ public class TopologyBuilder {
     this.builder
         .stream(this.inputTopic, Consumed.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+            this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
         .mapValues(v -> this.gson.toJson(v))
         .foreach((k, v) -> LOGGER.info("Key: " + k + " Value: " + v));
 
diff --git a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
index 4af3f130373d0596232921b9c5cc0b48df573b72..7699ecb48369a2041777b901931c46072a10d99f 100644
--- a/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
+++ b/uc1-application/src/main/java/theodolite/uc1/streamprocessing/Uc1KafkaStreamsBuilder.java
@@ -3,6 +3,7 @@ package theodolite.uc1.streamprocessing;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -18,6 +19,7 @@ public class Uc1KafkaStreamsBuilder extends KafkaStreamsBuilder {
   @Override
   protected Topology buildTopology() {
     Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
-    return new TopologyBuilder(this.inputTopic).build();
+    return new TopologyBuilder(this.inputTopic,
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl)).build();
   }
 }
diff --git a/uc1-application/src/main/resources/META-INF/application.properties b/uc1-application/src/main/resources/META-INF/application.properties
index 9dcbb9a64be111c2ea1db006081b983c9007b140..3fb301516daa4c7e14875d3d9ca9df9c770eb69e 100644
--- a/uc1-application/src/main/resources/META-INF/application.properties
+++ b/uc1-application/src/main/resources/META-INF/application.properties
@@ -5,6 +5,8 @@ kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
 kafka.output.topic=output
 
+schema.registry.url=http://localhost:8091
+
 num.threads=1
 commit.interval.ms=100
 cache.max.bytes.buffering=-1
diff --git a/uc1-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc1-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
index bcff74b9a5a4efc72ce1f206f5f10c13557eafd7..a7b27dfdb25760f0b96c930c9705c2eed0402442 100644
--- a/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
+++ b/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java
@@ -1,92 +1,94 @@
 package theodolite.uc1.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.models.records.ActivePowerRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+import titan.ccp.model.records.ActivePowerRecord;
 
-public class LoadGenerator {
+/**
+ * Load Generator for UC1.
+ */
+public final class LoadGenerator {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
-  private static final int WL_MAX_RECORDS = 150_000;
+  private static final long MAX_DURATION_IN_DAYS = 30L;
 
+  private LoadGenerator() {}
+
+  /**
+   * Entry point.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
+    // uc1
     LOGGER.info("Start workload generator for use case UC1.");
 
+    // get environment variables
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
     final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
-    final int instanceId = getInstanceId();
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"),
+        "4"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
-    final int idStart = instanceId * WL_MAX_RECORDS;
-    final int idEnd = Math.min((instanceId + 1) * WL_MAX_RECORDS, numSensors);
-    LOGGER.info("Generating data for sensors with IDs from {} to {} (exclusive).", idStart, idEnd);
-    final List<String> sensors = IntStream.range(idStart, idEnd)
-        .mapToObj(i -> "s_" + i)
-        .collect(Collectors.toList());
-
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers,
-        kafkaInputTopic,
-        r -> r.getIdentifier(),
-        r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
 
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
 
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", numSensors))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
 
+    // start
+    workloadGenerator.start();
   }
-
-  private static int getInstanceId() {
-    final String podName = System.getenv("POD_NAME");
-    if (podName == null) {
-      return 0;
-    } else {
-      return Pattern.compile("-")
-          .splitAsStream(podName)
-          .reduce((p, x) -> x)
-          .map(Integer::parseInt)
-          .orElse(0);
-    }
-  }
-
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java b/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
index 06a6d9ccbf6750290335cd7389391eb613b1569a..a193fe134311e656f1010c738675210689e1b9d6 100644
--- a/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
+++ b/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java
@@ -4,8 +4,9 @@ import java.time.Duration;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc2.streamprocessing.Uc2KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -14,7 +15,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class AggregationService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
 
@@ -49,6 +50,7 @@ public class AggregationService {
         .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
         .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
index 0555df96c153065ecf9be2bf2ead10de60d55cbf..724c7f6e2eaebc7be53f03b89d143d885c4a055c 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java
@@ -9,7 +9,7 @@ import org.apache.kafka.streams.KeyValue;
 import org.apache.kafka.streams.kstream.Transformer;
 import org.apache.kafka.streams.processor.ProcessorContext;
 import org.apache.kafka.streams.state.KeyValueStore;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Transforms the join result of an {@link ActivePowerRecord} and the corresponding sensor parents
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java
index b78eec51e1cd9e717f79b075e5e27230af56dbe7..cf4362a21ebd0e7b3bb9c4cad4ca871d0b3f2ea8 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerFactory.java
@@ -8,7 +8,7 @@ import org.apache.kafka.streams.kstream.TransformerSupplier;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.StoreBuilder;
 import org.apache.kafka.streams.state.Stores;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Factory class configuration required by {@link JointFlatTransformerFactory}.
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
index 02b7318587a77228e7fb2f7dc1b3350bac532c89..cba05f1ed8e585d5c31aaa92207e0d2854436736 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java
@@ -1,7 +1,8 @@
 package theodolite.uc2.streamprocessing;
 
+import java.util.Objects;
 import java.util.Set;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * A joined pair of an {@link ActivePowerRecord} and its associated parents. Both the record and the
@@ -26,6 +27,27 @@ public class JointRecordParents {
     return this.record;
   }
 
+  @Override
+  public String toString() {
+    return "{" + this.parents + ", " + this.record + "}";
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.parents, this.record);
+  }
 
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (obj instanceof JointRecordParents) {
+      final JointRecordParents other = (JointRecordParents) obj;
+      return Objects.equals(this.parents, other.parents)
+          && Objects.equals(this.record, other.record);
+    }
+    return false;
+  }
 
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
index 10fb98c9c575bde508a7e24c9e825b25475eff76..9564e994da8fc909147bec76097c737f14247868 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java
@@ -1,8 +1,8 @@
 package theodolite.uc2.streamprocessing;
 
 import org.apache.kafka.streams.kstream.Windowed;
-import titan.ccp.models.records.ActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.records.AggregatedActivePowerRecord;
 
 /**
  * Updates an {@link AggregatedActivePowerRecord} by a new {@link ActivePowerRecord}.
@@ -19,7 +19,7 @@ public class RecordAggregator {
     final double average = count == 0 ? 0.0 : sum / count;
     return new AggregatedActivePowerRecord(
         identifier.key(), record.getTimestamp(),
-        0.0, 0.0, count, sum, average);
+        count, sum, average);
   }
 
   /**
@@ -32,8 +32,7 @@ public class RecordAggregator {
     final double average = count == 0 ? 0.0 : sum / count;
     return new AggregatedActivePowerRecord(
         // TODO timestamp -1 indicates that this record is emitted by an substract event
-        identifier.key(), -1,
-        0.0, 0.0, count, sum, average);
+        identifier.key(), -1L, count, sum, average);
   }
 
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
index d65c93034a0fc9a801cf5be0c2f7f50e38d9178e..a4fb5b33966882b94d46c96282bdaaed92d67ebd 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java
@@ -1,5 +1,7 @@
 package theodolite.uc2.streamprocessing;
 
+import java.util.Objects;
+
 /**
  * A key consisting of the identifier of a sensor and an identifier of parent sensor.
  */
@@ -27,4 +29,22 @@ public class SensorParentKey {
     return "{" + this.sensorIdentifier + ", " + this.parentIdentifier + "}";
   }
 
+  @Override
+  public int hashCode() {
+    return Objects.hash(this.sensorIdentifier, this.parentIdentifier);
+  }
+
+  @Override
+  public boolean equals(final Object obj) {
+    if (obj == this) {
+      return true;
+    }
+    if (obj instanceof SensorParentKey) {
+      final SensorParentKey other = (SensorParentKey) obj;
+      return Objects.equals(this.sensorIdentifier, other.sensorIdentifier)
+          && Objects.equals(this.parentIdentifier, other.parentIdentifier);
+    }
+    return false;
+  }
+
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
index b6c46fa3a1822cbf1a11e3a8399aa7a061283952..b2dfae12a0bd207b490086d8ca0767d5a6b9cb1d 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java
@@ -18,40 +18,47 @@ import org.apache.kafka.streams.kstream.Suppressed.BufferConfig;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.apache.kafka.streams.kstream.Windowed;
 import org.apache.kafka.streams.kstream.WindowedSerdes;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.configuration.events.EventSerde;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.records.AggregatedActivePowerRecord;
 import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-import titan.ccp.models.records.ActivePowerRecordFactory;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
  */
 public class TopologyBuilder {
 
-  // private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
+
+  private static final int LATENCY_OUTPOUT_THRESHOLD = 1000;
+  private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
 
   private final String inputTopic;
   private final String outputTopic;
   private final String configurationTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
   private final Duration windowSize;
   private final Duration gracePeriod;
 
   private final StreamsBuilder builder = new StreamsBuilder();
   private final RecordAggregator recordAggregator = new RecordAggregator();
 
+  private StatsAccumulator latencyStats = new StatsAccumulator();
+  private long lastTime = System.currentTimeMillis();
 
   /**
    * Create a new {@link TopologyBuilder} using the given topics.
    */
   public TopologyBuilder(final String inputTopic, final String outputTopic,
-      final String configurationTopic, final Duration windowSize, final Duration gracePeriod) {
+      final String configurationTopic, final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory,
+      final Duration windowSize, final Duration gracePeriod) {
     this.inputTopic = inputTopic;
     this.outputTopic = outputTopic;
     this.configurationTopic = configurationTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
     this.windowSize = windowSize;
     this.gracePeriod = gracePeriod;
   }
@@ -84,11 +91,11 @@ public class TopologyBuilder {
     final KStream<String, ActivePowerRecord> values = this.builder
         .stream(this.inputTopic, Consumed.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())));
+            this.srAvroSerdeFactory.forValues()));
     final KStream<String, ActivePowerRecord> aggregationsInput = this.builder
         .stream(this.outputTopic, Consumed.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new AggregatedActivePowerRecordFactory())))
+            this.srAvroSerdeFactory.<AggregatedActivePowerRecord>forValues()))
         .mapValues(r -> new ActivePowerRecord(r.getIdentifier(), r.getTimestamp(), r.getSumInW()));
 
     final KTable<String, ActivePowerRecord> inputTable = values
@@ -96,9 +103,9 @@ public class TopologyBuilder {
         .mapValues((k, v) -> new ActivePowerRecord(v.getIdentifier(), System.currentTimeMillis(),
             v.getValueInW()))
         .groupByKey(Grouped.with(Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+            this.srAvroSerdeFactory.forValues()))
         .reduce((aggr, value) -> value, Materialized.with(Serdes.String(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())));
+            this.srAvroSerdeFactory.forValues()));
     return inputTable;
   }
 
@@ -140,13 +147,13 @@ public class TopologyBuilder {
             jointFlatMapTransformerFactory.getStoreName())
         .groupByKey(Grouped.with(
             SensorParentKeySerde.serde(),
-            IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+            this.srAvroSerdeFactory.forValues()))
         .windowedBy(TimeWindows.of(this.windowSize).grace(this.gracePeriod))
         .reduce(
             // TODO Configurable window aggregation function
             (aggValue, newValue) -> newValue,
             Materialized.with(SensorParentKeySerde.serde(),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())));
+                this.srAvroSerdeFactory.forValues()));
 
   }
 
@@ -159,14 +166,14 @@ public class TopologyBuilder {
                 new WindowedSerdes.TimeWindowedSerde<>(
                     Serdes.String(),
                     this.windowSize.toMillis()),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+                this.srAvroSerdeFactory.forValues()))
         .aggregate(
             () -> null, this.recordAggregator::add, this.recordAggregator::substract,
             Materialized.with(
                 new WindowedSerdes.TimeWindowedSerde<>(
                     Serdes.String(),
                     this.windowSize.toMillis()),
-                IMonitoringRecordSerde.serde(new AggregatedActivePowerRecordFactory())))
+                this.srAvroSerdeFactory.forValues()))
         .suppress(Suppressed.untilTimeLimit(this.windowSize, BufferConfig.unbounded()))
         // .suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded()))
         .toStream()
@@ -175,36 +182,35 @@ public class TopologyBuilder {
         .map((k, v) -> KeyValue.pair(k.key(), v)); // TODO compute Timestamp
   }
 
-  private StatsAccumulator latencyStats = new StatsAccumulator();
-  private long lastTime = System.currentTimeMillis();
-
   private void exposeOutputStream(final KStream<String, AggregatedActivePowerRecord> aggregations) {
     aggregations
         .peek((k, v) -> {
           final long time = System.currentTimeMillis();
           final long latency = time - v.getTimestamp();
           this.latencyStats.add(latency);
-          if (time - this.lastTime >= 1000) {
-            System.out.println("latency,"
-                + time + ','
-                + this.latencyStats.mean() + ','
-                + (this.latencyStats.count() > 0
-                    ? this.latencyStats.populationStandardDeviation()
-                    : Double.NaN)
-                + ','
-                + (this.latencyStats.count() > 1
-                    ? this.latencyStats.sampleStandardDeviation()
-                    : Double.NaN)
-                + ','
-                + this.latencyStats.min() + ','
-                + this.latencyStats.max() + ','
-                + this.latencyStats.count());
+          if (time - this.lastTime >= LATENCY_OUTPOUT_THRESHOLD) {
+            if (LOGGER.isInfoEnabled()) {
+              LOGGER.info("latency,"
+                  + time + ','
+                  + this.latencyStats.mean() + ','
+                  + (this.latencyStats.count() > 0
+                      ? this.latencyStats.populationStandardDeviation()
+                      : Double.NaN)
+                  + ','
+                  + (this.latencyStats.count() > 1
+                      ? this.latencyStats.sampleStandardDeviation()
+                      : Double.NaN)
+                  + ','
+                  + this.latencyStats.min() + ','
+                  + this.latencyStats.max() + ','
+                  + this.latencyStats.count());
+            }
             this.latencyStats = new StatsAccumulator();
             this.lastTime = time;
           }
         })
         .to(this.outputTopic, Produced.with(
             Serdes.String(),
-            IMonitoringRecordSerde.serde(new AggregatedActivePowerRecordFactory())));
+            this.srAvroSerdeFactory.forValues()));
   }
 }
diff --git a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
index ce7d5e90b476a9d8b8508ea2356f4a2da1d856f3..2f3e5c7e994a3d194810016c4664a5a83c4cc21b 100644
--- a/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
+++ b/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java
@@ -4,6 +4,7 @@ import java.time.Duration;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -54,6 +55,7 @@ public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD build
         this.inputTopic,
         this.outputTopic,
         this.configurationTopic,
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl),
         this.windowSize == null ? WINDOW_SIZE_DEFAULT : this.windowSize,
         this.gracePeriod == null ? GRACE_PERIOD_DEFAULT : this.gracePeriod);
 
diff --git a/uc2-application/src/main/resources/META-INF/application.properties b/uc2-application/src/main/resources/META-INF/application.properties
index f9a5225680f638239e637e99bf8d65152d15764d..74f47163d0fa02d1e3b582aab53bc8907a7855af 100644
--- a/uc2-application/src/main/resources/META-INF/application.properties
+++ b/uc2-application/src/main/resources/META-INF/application.properties
@@ -8,8 +8,12 @@ configuration.kafka.topic=configuration
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
 kafka.output.topic=output
+
+schema.registry.url=http://localhost:8091
+
 window.size.ms=1000
 window.grace.ms=0
+
 num.threads=1
 commit.interval.ms=100
 cache.max.bytes.buffering=-1
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
index 49ed674bc4442f01de1cf51e4510f2079524933d..54e8c460e642d53bb013ef6888570d6fc36ff614 100644
--- a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
+++ b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java
@@ -3,7 +3,6 @@ package theodolite.uc2.streamprocessing;
 import java.util.Optional;
 import java.util.Set;
 import org.junit.Test;
-import theodolite.uc2.streamprocessing.OptionalParentsSerde;
 
 public class OptionalParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
index 15872798698ceffcdbaddb689d4179afd7d67a01..f12604d6a19ca36e9c151210005c910b37908307 100644
--- a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
+++ b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java
@@ -2,7 +2,6 @@ package theodolite.uc2.streamprocessing;
 
 import java.util.Set;
 import org.junit.Test;
-import theodolite.uc2.streamprocessing.ParentsSerde;
 
 public class ParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
index 7d9fe3a6eb83b82d85913f212fe9a930f194b220..7ca99bcb79baeb5f95a8270b99a559f2f108867e 100644
--- a/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
+++ b/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java
@@ -1,8 +1,6 @@
 package theodolite.uc2.streamprocessing;
 
 import org.junit.Test;
-import theodolite.uc2.streamprocessing.SensorParentKey;
-import theodolite.uc2.streamprocessing.SensorParentKeySerde;
 
 public class SensorParentKeySerdeTest {
 
diff --git a/uc2-workload-generator/build.gradle b/uc2-workload-generator/build.gradle
index f2c3e5d2e73b655dffd94222ecfbc4fc31b7f722..b92e0c2edc54786ea957338b9981922f0a6a7b32 100644
--- a/uc2-workload-generator/build.gradle
+++ b/uc2-workload-generator/build.gradle
@@ -1 +1 @@
-mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator"
+mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator"
diff --git a/uc2-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc2-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
index c8b3a1846254603c8690bf395c24c6d6f9fb2166..ad24e8e4bc8f86b7ed4d5dc2822622f8da22d6d1 100644
--- a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
+++ b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java
@@ -10,8 +10,14 @@ import org.apache.kafka.common.serialization.StringSerializer;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.configuration.events.EventSerde;
 
+/**
+ * Class to publish a configuration to Kafka.
+ *
+ */
 public class ConfigPublisher {
 
+  private static final String MEMORY_CONFIG = "134217728"; // 128 MB
+
   private final String topic;
 
   private final Producer<Event, String> producer;
@@ -20,6 +26,13 @@ public class ConfigPublisher {
     this(bootstrapServers, topic, new Properties());
   }
 
+  /**
+   * Creates a new {@link ConfigPublisher} object.
+   *
+   * @param bootstrapServers Zoo Keeper server.
+   * @param topic where to write the configuration.
+   * @param defaultProperties default properties.
+   */
   public ConfigPublisher(final String bootstrapServers, final String topic,
       final Properties defaultProperties) {
     this.topic = topic;
@@ -27,13 +40,19 @@ public class ConfigPublisher {
     final Properties properties = new Properties();
     properties.putAll(defaultProperties);
     properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
+    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, MEMORY_CONFIG);
+    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, MEMORY_CONFIG);
 
     this.producer =
         new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
   }
 
+  /**
+   * Publish an event with given value to the kafka topic.
+   *
+   * @param event Which {@link Event} happened.
+   * @param value Configuration value.
+   */
   public void publish(final Event event, final String value) {
     final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
     try {
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
index 823f4f2761cc3c409451c67b7302e3d2f17adbb9..3eb3e8d25b1f1aa6f302673727b8457a744fb503 100644
--- a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
+++ b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java
@@ -1,121 +1,139 @@
 package theodolite.uc2.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
 import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
+import titan.ccp.model.records.ActivePowerRecord;
+import titan.ccp.model.sensorregistry.SensorRegistry;
 
-public class LoadGenerator {
+/**
+ * The {@code LoadGenerator} creates a load in Kafka.
+ */
+public final class LoadGenerator {
+
+  private static final int SLEEP_PERIOD = 30_000;
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
+  // Constants
+  private static final String DEEP = "deep";
+  private static final long MAX_DURATION_IN_DAYS = 30L;
+
+  // Make this a utility class, because all methods are static.
+  private LoadGenerator() {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Main method.
+   *
+   * @param args CLI arguments
+   * @throws InterruptedException Interrupt happened
+   * @throws IOException happened.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
+    // uc2
     LOGGER.info("Start workload generator for use case UC2.");
 
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
+    // get environment variables
+    final String hierarchy = System.getenv("HIERARCHY");
+    if (hierarchy != null && hierarchy.equals(DEEP)) {
+      LOGGER.error(
+          "The HIERARCHY parameter is no longer supported. Creating a full hierachy instead.");
+    }
     final int numNestedGroups = Integer
         .parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
+    final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
     final boolean sendRegistry = Boolean
         .parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
     final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
             "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
+    // build sensor registry
+    final SensorRegistry sensorRegistry =
+        new SensorRegistryBuilder(numNestedGroups, numSensors).build();
 
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
-        new KafkaRecordSender<>(kafkaBootstrapServers,
-            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
 
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl,
-      final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
+
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", sensorRegistry.getMachineSensors().size()))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .beforeAction(() -> {
+              if (sendRegistry) {
+                final ConfigPublisher configPublisher =
+                    new ConfigPublisher(kafkaBootstrapServers, "configuration");
+                configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+                configPublisher.close();
+                LOGGER.info("Configuration sent.");
+
+                LOGGER.info("Now wait 30 seconds");
+                try {
+                  Thread.sleep(SLEEP_PERIOD);
+                } catch (final InterruptedException e) {
+                  // TODO Auto-generated catch block
+                  LOGGER.error(e.getMessage(), e);
+                }
+                LOGGER.info("And woke up again :)");
+              }
+            })
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
+
+    // start
+    workloadGenerator.start();
   }
 
 }
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGeneratorExtrem.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGeneratorExtrem.java
deleted file mode 100644
index 1e58541758602cd2b1ea84f3ac3360aa3911425d..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGeneratorExtrem.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package theodolite.uc2.workloadgenerator;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGeneratorExtrem {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final boolean doNothing =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final int producers =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final SensorRegistry sensorRegistry =
-        buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-        .<KafkaRecordSender<ActivePowerRecord>>generate(
-            () -> new KafkaRecordSender<>(
-                kafkaBootstrapServers,
-                kafkaInputTopic,
-                r -> r.getIdentifier(),
-                r -> r.getTimestamp(),
-                kafkaProperties))
-        .limit(producers)
-        .collect(Collectors.toList());
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    for (int i = 0; i < threads; i++) {
-      final int threadId = i;
-      new Thread(() -> {
-        while (true) {
-          for (final String sensor : sensors) {
-            if (!doNothing) {
-              kafkaRecordSenders.get(threadId % producers).write(new ActivePowerRecord(
-                  sensor,
-                  System.currentTimeMillis(),
-                  value));
-            }
-          }
-        }
-      }).start();
-    }
-
-    while (true) {
-      printCpuUsagePerThread();
-    }
-
-    // System.out.println("Wait for termination...");
-    // Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-    // System.out.println("Will terminate now");
-  }
-
-  private static void printCpuUsagePerThread() throws InterruptedException {
-    final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-    final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-    final long start = System.nanoTime();
-    final long[] startCpuTimes = new long[threads.size()];
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-    }
-
-    Thread.sleep(5000);
-
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-      final long dur = System.nanoTime() - start;
-      final double util = (double) cpuTime / dur;
-      System.out.println(
-          "Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-    }
-  }
-
-  private static SensorRegistry buildSensorRegistry(final String hierarchy,
-      final int numNestedGroups, final int numSensor) {
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-    return sensorRegistry;
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..7c34ac89471386f4ddd508a304f2197602beab27
--- /dev/null
+++ b/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java
@@ -0,0 +1,51 @@
+package theodolite.uc2.workloadgenerator;
+
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+/**
+ * Builder for creating a nested {@link SensorRegistry} with {@code numNestedGroups} levels and
+ * {@code numSensors} children per group.
+ */
+public final class SensorRegistryBuilder {
+
+  private final int numNestedGroups;
+  private final int numSensors;
+
+  public SensorRegistryBuilder(final int numNestedGroups, final int numSensors) {
+    this.numNestedGroups = numNestedGroups;
+    this.numSensors = numSensors;
+  }
+
+  /**
+   * Creates the {@link SensorRegistry}.
+   */
+  public SensorRegistry build() {
+    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+    this.addChildren(
+        sensorRegistry.getTopLevelSensor(),
+        this.numSensors,
+        1,
+        this.numNestedGroups,
+        0);
+    return sensorRegistry;
+  }
+
+  private int addChildren(final MutableAggregatedSensor parent, final int numChildren,
+      final int lvl, final int maxLvl, final int startId) {
+    int nextId = startId;
+    for (int c = 0; c < numChildren; c++) {
+      if (lvl == maxLvl) {
+        parent.addChildMachineSensor("s_" + nextId);
+        nextId++;
+      } else {
+        final MutableAggregatedSensor newParent =
+            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
+        nextId = this.addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
+      }
+    }
+    return nextId;
+  }
+
+}
diff --git a/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java b/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..17b208edac4acafa92b7a75e053e2fe97a9afdb6
--- /dev/null
+++ b/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java
@@ -0,0 +1,46 @@
+package theodolite.uc2.workloadgenerator;
+
+
+import java.util.Collection;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.junit.Assert;
+import org.junit.Test;
+import titan.ccp.model.sensorregistry.AggregatedSensor;
+import titan.ccp.model.sensorregistry.MachineSensor;
+import titan.ccp.model.sensorregistry.Sensor;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+
+public class SensorRegistryBuilderTest {
+
+  @Test
+  public void testStructure() {
+    final SensorRegistry registry = new SensorRegistryBuilder(2, 2).build();
+    final AggregatedSensor root = registry.getTopLevelSensor();
+    final Collection<Sensor> firstLevelSensors = root.getChildren();
+    Assert.assertEquals(2, firstLevelSensors.size());
+    for (final Sensor sensor : firstLevelSensors) {
+      Assert.assertTrue(sensor instanceof AggregatedSensor);
+      final AggregatedSensor aggregatedSensor = (AggregatedSensor) sensor;
+      final Collection<Sensor> secondLevelSensors = aggregatedSensor.getChildren();
+      Assert.assertEquals(2, secondLevelSensors.size());
+      for (final Sensor machineSensors : secondLevelSensors) {
+        Assert.assertTrue(machineSensors instanceof MachineSensor);
+
+      }
+    }
+  }
+
+  @Test
+  public void testMachineSensorNaming() {
+    final SensorRegistry registry = new SensorRegistryBuilder(2, 2).build();
+    final Set<String> machineSensors = registry.getMachineSensors().stream()
+        .map(s -> s.getIdentifier()).collect(Collectors.toSet());
+
+    Assert.assertTrue(machineSensors.contains("s_0"));
+    Assert.assertTrue(machineSensors.contains("s_1"));
+    Assert.assertTrue(machineSensors.contains("s_2"));
+    Assert.assertTrue(machineSensors.contains("s_3"));
+  }
+
+}
diff --git a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
index 18aae8c3499643c29901c3ca7461ec707d59c280..b245b1645c9e5ee68df3f108802c9b91d70cf017 100644
--- a/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
+++ b/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java
@@ -5,8 +5,9 @@ import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc3.streamprocessing.Uc3KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -15,7 +16,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class HistoryService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
   private final int windowDurationMinutes = Integer
@@ -45,6 +46,7 @@ public class HistoryService {
         .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
         .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
index 0ad1845f656bcbd11b61c0e0affa9b6bcfabd2f7..74eed74c52a78df229c02542bc6e66d7f796c2c7 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
+++ b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java
@@ -14,8 +14,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import theodolite.uc3.streamprocessing.util.StatsFactory;
 import titan.ccp.common.kafka.GenericSerde;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.ActivePowerRecordFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
@@ -26,6 +26,7 @@ public class TopologyBuilder {
 
   private final String inputTopic;
   private final String outputTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
   private final Duration duration;
 
   private final StreamsBuilder builder = new StreamsBuilder();
@@ -34,9 +35,11 @@ public class TopologyBuilder {
    * Create a new {@link TopologyBuilder} using the given topics.
    */
   public TopologyBuilder(final String inputTopic, final String outputTopic,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory,
       final Duration duration) {
     this.inputTopic = inputTopic;
     this.outputTopic = outputTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
     this.duration = duration;
   }
 
@@ -47,7 +50,7 @@ public class TopologyBuilder {
     this.builder
         .stream(this.inputTopic,
             Consumed.with(Serdes.String(),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+                this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
         .groupByKey()
         .windowedBy(TimeWindows.of(this.duration))
         // .aggregate(
@@ -62,7 +65,7 @@ public class TopologyBuilder {
                 GenericSerde.from(Stats::toByteArray, Stats::fromByteArray)))
         .toStream()
         .map((k, s) -> KeyValue.pair(k.key(), s.toString()))
-        .peek((k, v) -> System.out.println(k + ": " + v))
+        .peek((k, v) -> LOGGER.info(k + ": " + v))
         .to(this.outputTopic, Produced.with(Serdes.String(), Serdes.String()));
 
     return this.builder.build();
diff --git a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
index 63841361b06bb054fee203a894fba0c11c249d16..e74adf7c87673cc0e6ea4004dbcb1c0a6fc907ac 100644
--- a/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
+++ b/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java
@@ -4,6 +4,7 @@ import java.time.Duration;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -36,7 +37,7 @@ public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder {
     Objects.requireNonNull(this.windowDuration, "Window duration has not been set.");
 
     final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic,
-        this.windowDuration);
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), this.windowDuration);
     return topologyBuilder.build();
   }
 
diff --git a/uc3-application/src/main/resources/META-INF/application.properties b/uc3-application/src/main/resources/META-INF/application.properties
index 96e2d8b6ff46f3b3ce878b1fec011e9315e118bc..2ceaf37224b0bff54b09beaabe29210216e11671 100644
--- a/uc3-application/src/main/resources/META-INF/application.properties
+++ b/uc3-application/src/main/resources/META-INF/application.properties
@@ -4,6 +4,10 @@ application.version=0.0.1
 kafka.bootstrap.servers=localhost:9092
 kafka.input.topic=input
 kafka.output.topic=output
+kafka.window.duration.minutes=1
+
+schema.registry.url=http://localhost:8091
+
 num.threads=1
 commit.interval.ms=100
 cache.max.bytes.buffering=-1
diff --git a/uc3-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc3-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc3-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
index a063ea359571d67fe118ec2f0951664e62624d98..85f6a94036c53b48973ba2200212fc8e5dfd663d 100644
--- a/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
+++ b/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java
@@ -1,92 +1,102 @@
 package theodolite.uc3.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.models.records.ActivePowerRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+import titan.ccp.model.records.ActivePowerRecord;
 
-public class LoadGenerator {
+/**
+ * The {@code LoadGenerator} creates a load in Kafka.
+ */
+public final class LoadGenerator {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
-  private static final int WL_MAX_RECORDS = 150_000;
+  // constants
+  private static final long MAX_DURATION_IN_DAYS = 30L;
 
+  // Make this a utility class, because all methods are static.
+  private LoadGenerator() {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Main method.
+   *
+   * @param args CLI arguments
+   * @throws InterruptedException Interrupt happened
+   * @throws IOException happened.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
+    // uc2
     LOGGER.info("Start workload generator for use case UC3.");
 
+    // get environment variables
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
     final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
-    final int instanceId = getInstanceId();
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
     final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
             "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
-    final int idStart = instanceId * WL_MAX_RECORDS;
-    final int idEnd = Math.min((instanceId + 1) * WL_MAX_RECORDS, numSensors);
-    LOGGER.info("Generating data for sensors with IDs from {} to {} (exclusive).", idStart, idEnd);
-    final List<String> sensors = IntStream.range(idStart, idEnd)
-        .mapToObj(i -> "s_" + i)
-        .collect(Collectors.toList());
-
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
     final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
-        new KafkaRecordSender<>(kafkaBootstrapServers,
-            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
 
-    LOGGER.info("Start setting up sensors.");
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
-    LOGGER.info("Finished setting up sensors.");
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", numSensors))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
 
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
+    // start
+    workloadGenerator.start();
 
   }
-
-  private static int getInstanceId() {
-    final String podName = System.getenv("POD_NAME");
-    if (podName == null) {
-      return 0;
-    } else {
-      return Pattern.compile("-")
-          .splitAsStream(podName)
-          .reduce((p, x) -> x)
-          .map(Integer::parseInt)
-          .orElse(0);
-    }
-  }
-
 }
diff --git a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java b/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
index 3e3073fdeed682ae09e345d9f315585e960a3440..23af805733de2bb3f6384fa924a2322490ee58d9 100644
--- a/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
+++ b/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java
@@ -4,8 +4,9 @@ import java.time.Duration;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import theodolite.commons.kafkastreams.ConfigurationKeys;
 import theodolite.uc4.streamprocessing.Uc4KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
+import titan.ccp.common.configuration.ServiceConfigurations;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
@@ -14,7 +15,7 @@ import titan.ccp.common.configuration.Configurations;
  */
 public class HistoryService {
 
-  private final Configuration config = Configurations.create();
+  private final Configuration config = ServiceConfigurations.createWithDefaults();
 
   private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
 
@@ -45,6 +46,7 @@ public class HistoryService {
         .applicationName(this.config.getString(ConfigurationKeys.APPLICATION_NAME))
         .applicationVersion(this.config.getString(ConfigurationKeys.APPLICATION_VERSION))
         .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+        .schemaRegistry(this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL))
         .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
         .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
         .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
index b4632aaf15ee5f2572c795458f4bfded5c8cfbcd..a92abae6e11c4bf66a5d8d8dee0f10b088e8274b 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
+++ b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java
@@ -17,8 +17,8 @@ import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import theodolite.uc4.streamprocessing.util.StatsFactory;
 import titan.ccp.common.kafka.GenericSerde;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.ActivePowerRecordFactory;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+import titan.ccp.model.records.ActivePowerRecord;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
@@ -32,6 +32,7 @@ public class TopologyBuilder {
 
   private final String inputTopic;
   private final String outputTopic;
+  private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory;
   private final Duration aggregtionDuration;
   private final Duration aggregationAdvance;
 
@@ -41,9 +42,11 @@ public class TopologyBuilder {
    * Create a new {@link TopologyBuilder} using the given topics.
    */
   public TopologyBuilder(final String inputTopic, final String outputTopic,
+      final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory,
       final Duration aggregtionDuration, final Duration aggregationAdvance) {
     this.inputTopic = inputTopic;
     this.outputTopic = outputTopic;
+    this.srAvroSerdeFactory = srAvroSerdeFactory;
     this.aggregtionDuration = aggregtionDuration;
     this.aggregationAdvance = aggregationAdvance;
   }
@@ -58,14 +61,14 @@ public class TopologyBuilder {
     this.builder
         .stream(this.inputTopic,
             Consumed.with(Serdes.String(),
-                IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+                this.srAvroSerdeFactory.<ActivePowerRecord>forValues()))
         .selectKey((key, value) -> {
           final Instant instant = Instant.ofEpochMilli(value.getTimestamp());
           final LocalDateTime dateTime = LocalDateTime.ofInstant(instant, this.zone);
           return keyFactory.createKey(value.getIdentifier(), dateTime);
         })
         .groupByKey(
-            Grouped.with(keySerde, IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+            Grouped.with(keySerde, this.srAvroSerdeFactory.forValues()))
         .windowedBy(TimeWindows.of(this.aggregtionDuration).advanceBy(this.aggregationAdvance))
         .aggregate(
             () -> Stats.of(),
diff --git a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
index 8220f4cd36b0639cd69ac102177a53b1ed90e5b6..7c9e2c4f790cf1fbb7dd34db573576d1e64077db 100644
--- a/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
+++ b/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java
@@ -4,6 +4,7 @@ import java.time.Duration;
 import java.util.Objects;
 import org.apache.kafka.streams.Topology;
 import theodolite.commons.kafkastreams.KafkaStreamsBuilder;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
 
 /**
  * Builder for the Kafka Streams configuration.
@@ -45,6 +46,7 @@ public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder {
     final TopologyBuilder topologyBuilder = new TopologyBuilder(
         this.inputTopic,
         this.outputTopic,
+        new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl),
         this.aggregtionDuration,
         this.aggregationAdvance);
 
diff --git a/uc4-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java b/uc4-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
deleted file mode 100644
index bf562d86ac913138f48da79c4542d9583b1c8390..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/theodolite/kafkasender/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package theodolite.kafkasender;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
index 90d28aafb86b2b5da050d0110d425b5ec1ffe5e6..ff551e7ef423633137d122dfed7d6e03d362e7ff 100644
--- a/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
+++ b/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java
@@ -1,70 +1,103 @@
 package theodolite.uc4.workloadgenerator;
 
 import java.io.IOException;
-import java.util.List;
+import java.time.Duration;
+import java.time.temporal.ChronoUnit;
 import java.util.Objects;
 import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import theodolite.kafkasender.KafkaRecordSender;
-import titan.ccp.models.records.ActivePowerRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator;
+import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+import titan.ccp.model.records.ActivePowerRecord;
 
-public class LoadGenerator {
+/**
+ * The {@code LoadGenerator} creates a load in Kafka.
+ */
+public final class LoadGenerator {
 
   private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
+  // constants
+  private static final long MAX_DURATION_IN_DAYS = 30L;
+
+  // Make this a utility class, because all methods are static.
+  private LoadGenerator() {
+    throw new UnsupportedOperationException();
+  }
+
+  /**
+   * Main method.
+   *
+   * @param args CLI arguments
+   * @throws InterruptedException Interrupt happened
+   * @throws IOException happened.
+   */
   public static void main(final String[] args) throws InterruptedException, IOException {
     // uc4
     LOGGER.info("Start workload generator for use case UC4.");
 
-    final int numSensor =
+    // get environment variables
+    final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost");
+    final int zooKeeperPort =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181"));
+    final int numSensors =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
     final int periodMs =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final double value =
+        Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
     final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "1"));
     final String kafkaBootstrapServers =
         Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
             "localhost:9092");
+    final String schemaRegistryUrl =
+        Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+    final int instances =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1"));
 
+    // create kafka record sender
     final Properties kafkaProperties = new Properties();
     // kafkaProperties.put("acks", this.acknowledges);
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
-        new KafkaRecordSender<>(kafkaBootstrapServers,
-            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
 
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    final List<String> sensors =
-        IntStream.range(0, numSensor).mapToObj(i -> "s_" + i).collect(Collectors.toList());
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(() -> {
-        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-    }
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender.Builder<ActivePowerRecord>(
+            kafkaBootstrapServers,
+            kafkaInputTopic,
+            schemaRegistryUrl)
+                .keyAccessor(r -> r.getIdentifier())
+                .timestampAccessor(r -> r.getTimestamp())
+                .defaultProperties(kafkaProperties)
+                .build();
 
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
+    // create workload generator
+    final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator =
+        KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder()
+            .instances(instances)
+            .keySpace(new KeySpace("s_", numSensors))
+            .threads(threads)
+            .period(Duration.of(periodMs, ChronoUnit.MILLIS))
+            .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS))
+            .generatorFunction(
+                sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value))
+            .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort))
+            .kafkaRecordSender(kafkaRecordSender)
+            .build();
 
+    // start
+    workloadGenerator.start();
   }
 
 }
diff --git a/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs b/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..fa98ca63d77bdee891150bd6713f70197a75cefc
--- /dev/null
+++ b/workload-generator-commons/.settings/org.eclipse.jdt.ui.prefs
@@ -0,0 +1,127 @@
+cleanup.add_default_serial_version_id=true
+cleanup.add_generated_serial_version_id=false
+cleanup.add_missing_annotations=true
+cleanup.add_missing_deprecated_annotations=true
+cleanup.add_missing_methods=false
+cleanup.add_missing_nls_tags=false
+cleanup.add_missing_override_annotations=true
+cleanup.add_missing_override_annotations_interface_methods=true
+cleanup.add_serial_version_id=false
+cleanup.always_use_blocks=true
+cleanup.always_use_parentheses_in_expressions=false
+cleanup.always_use_this_for_non_static_field_access=true
+cleanup.always_use_this_for_non_static_method_access=true
+cleanup.convert_functional_interfaces=false
+cleanup.convert_to_enhanced_for_loop=true
+cleanup.correct_indentation=true
+cleanup.format_source_code=true
+cleanup.format_source_code_changes_only=false
+cleanup.insert_inferred_type_arguments=false
+cleanup.make_local_variable_final=true
+cleanup.make_parameters_final=true
+cleanup.make_private_fields_final=true
+cleanup.make_type_abstract_if_missing_method=false
+cleanup.make_variable_declarations_final=true
+cleanup.never_use_blocks=false
+cleanup.never_use_parentheses_in_expressions=true
+cleanup.organize_imports=true
+cleanup.qualify_static_field_accesses_with_declaring_class=false
+cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+cleanup.qualify_static_member_accesses_with_declaring_class=true
+cleanup.qualify_static_method_accesses_with_declaring_class=false
+cleanup.remove_private_constructors=true
+cleanup.remove_redundant_modifiers=false
+cleanup.remove_redundant_semicolons=true
+cleanup.remove_redundant_type_arguments=true
+cleanup.remove_trailing_whitespaces=true
+cleanup.remove_trailing_whitespaces_all=true
+cleanup.remove_trailing_whitespaces_ignore_empty=false
+cleanup.remove_unnecessary_casts=true
+cleanup.remove_unnecessary_nls_tags=true
+cleanup.remove_unused_imports=true
+cleanup.remove_unused_local_variables=false
+cleanup.remove_unused_private_fields=true
+cleanup.remove_unused_private_members=false
+cleanup.remove_unused_private_methods=true
+cleanup.remove_unused_private_types=true
+cleanup.sort_members=false
+cleanup.sort_members_all=false
+cleanup.use_anonymous_class_creation=false
+cleanup.use_blocks=true
+cleanup.use_blocks_only_for_return_and_throw=false
+cleanup.use_lambda=true
+cleanup.use_parentheses_in_expressions=true
+cleanup.use_this_for_non_static_field_access=true
+cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+cleanup.use_this_for_non_static_method_access=true
+cleanup.use_this_for_non_static_method_access_only_if_necessary=false
+cleanup_profile=_CAU-SE-Style
+cleanup_settings_version=2
+eclipse.preferences.version=1
+editor_save_participant_org.eclipse.jdt.ui.postsavelistener.cleanup=true
+formatter_profile=_CAU-SE-Style
+formatter_settings_version=15
+org.eclipse.jdt.ui.ignorelowercasenames=true
+org.eclipse.jdt.ui.importorder=;
+org.eclipse.jdt.ui.ondemandthreshold=99
+org.eclipse.jdt.ui.staticondemandthreshold=99
+sp_cleanup.add_default_serial_version_id=true
+sp_cleanup.add_generated_serial_version_id=false
+sp_cleanup.add_missing_annotations=true
+sp_cleanup.add_missing_deprecated_annotations=true
+sp_cleanup.add_missing_methods=false
+sp_cleanup.add_missing_nls_tags=false
+sp_cleanup.add_missing_override_annotations=true
+sp_cleanup.add_missing_override_annotations_interface_methods=true
+sp_cleanup.add_serial_version_id=false
+sp_cleanup.always_use_blocks=true
+sp_cleanup.always_use_parentheses_in_expressions=false
+sp_cleanup.always_use_this_for_non_static_field_access=true
+sp_cleanup.always_use_this_for_non_static_method_access=true
+sp_cleanup.convert_functional_interfaces=false
+sp_cleanup.convert_to_enhanced_for_loop=true
+sp_cleanup.correct_indentation=true
+sp_cleanup.format_source_code=true
+sp_cleanup.format_source_code_changes_only=false
+sp_cleanup.insert_inferred_type_arguments=false
+sp_cleanup.make_local_variable_final=true
+sp_cleanup.make_parameters_final=true
+sp_cleanup.make_private_fields_final=true
+sp_cleanup.make_type_abstract_if_missing_method=false
+sp_cleanup.make_variable_declarations_final=true
+sp_cleanup.never_use_blocks=false
+sp_cleanup.never_use_parentheses_in_expressions=true
+sp_cleanup.on_save_use_additional_actions=true
+sp_cleanup.organize_imports=true
+sp_cleanup.qualify_static_field_accesses_with_declaring_class=false
+sp_cleanup.qualify_static_member_accesses_through_instances_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_through_subtypes_with_declaring_class=true
+sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
+sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
+sp_cleanup.remove_private_constructors=true
+sp_cleanup.remove_redundant_modifiers=false
+sp_cleanup.remove_redundant_semicolons=true
+sp_cleanup.remove_redundant_type_arguments=true
+sp_cleanup.remove_trailing_whitespaces=true
+sp_cleanup.remove_trailing_whitespaces_all=true
+sp_cleanup.remove_trailing_whitespaces_ignore_empty=false
+sp_cleanup.remove_unnecessary_casts=true
+sp_cleanup.remove_unnecessary_nls_tags=true
+sp_cleanup.remove_unused_imports=true
+sp_cleanup.remove_unused_local_variables=false
+sp_cleanup.remove_unused_private_fields=true
+sp_cleanup.remove_unused_private_members=false
+sp_cleanup.remove_unused_private_methods=true
+sp_cleanup.remove_unused_private_types=true
+sp_cleanup.sort_members=false
+sp_cleanup.sort_members_all=false
+sp_cleanup.use_anonymous_class_creation=false
+sp_cleanup.use_blocks=true
+sp_cleanup.use_blocks_only_for_return_and_throw=false
+sp_cleanup.use_lambda=true
+sp_cleanup.use_parentheses_in_expressions=true
+sp_cleanup.use_this_for_non_static_field_access=true
+sp_cleanup.use_this_for_non_static_field_access_only_if_necessary=false
+sp_cleanup.use_this_for_non_static_method_access=true
+sp_cleanup.use_this_for_non_static_method_access_only_if_necessary=false
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs b/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..87860c815222845c1d264d7d0ce498d3397f8280
--- /dev/null
+++ b/workload-generator-commons/.settings/qa.eclipse.plugin.checkstyle.prefs
@@ -0,0 +1,4 @@
+configFilePath=../config/checkstyle.xml
+customModulesJarPaths=
+eclipse.preferences.version=1
+enabled=true
diff --git a/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs b/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
new file mode 100644
index 0000000000000000000000000000000000000000..efbcb8c9e5d449194a48ca1ea42b7d807b573db9
--- /dev/null
+++ b/workload-generator-commons/.settings/qa.eclipse.plugin.pmd.prefs
@@ -0,0 +1,4 @@
+customRulesJars=
+eclipse.preferences.version=1
+enabled=true
+ruleSetFilePath=../config/pmd.xml
diff --git a/workload-generator-commons/build.gradle b/workload-generator-commons/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..eef987cd444c3b6c3d8a532c8d192e94311176db
--- /dev/null
+++ b/workload-generator-commons/build.gradle
@@ -0,0 +1,3 @@
+dependencies {
+    implementation 'org.apache.curator:curator-recipes:4.3.0'
+}
\ No newline at end of file
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
new file mode 100644
index 0000000000000000000000000000000000000000..33818b51084ce33a564d6f30cefb26b481d0a859
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java
@@ -0,0 +1,123 @@
+package theodolite.commons.workloadgeneration.communication.kafka;
+
+import java.util.Properties;
+import java.util.function.Function;
+import org.apache.avro.specific.SpecificRecord;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.StringSerializer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.workloadgeneration.functions.Transport;
+import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory;
+
+/**
+ * Sends monitoring records to Kafka.
+ *
+ * @param <T> {@link IMonitoringRecord} to send
+ */
+public class KafkaRecordSender<T extends SpecificRecord> implements Transport<T> {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
+
+  private final String topic;
+
+  private final Function<T, String> keyAccessor;
+
+  private final Function<T, Long> timestampAccessor;
+
+  private final Producer<String, T> producer;
+
+  /**
+   * Create a new {@link KafkaRecordSender}.
+   */
+  private KafkaRecordSender(final Builder<T> builder) {
+    this.topic = builder.topic;
+    this.keyAccessor = builder.keyAccessor;
+    this.timestampAccessor = builder.timestampAccessor;
+
+    final Properties properties = new Properties();
+    properties.putAll(builder.defaultProperties);
+    properties.put("bootstrap.servers", builder.bootstrapServers);
+    // properties.put("acks", this.acknowledges);
+    // properties.put("batch.size", this.batchSize);
+    // properties.put("linger.ms", this.lingerMs);
+    // properties.put("buffer.memory", this.bufferMemory);
+
+    final SchemaRegistryAvroSerdeFactory avroSerdeFactory =
+        new SchemaRegistryAvroSerdeFactory(builder.schemaRegistryUrl);
+    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
+        avroSerdeFactory.<T>forKeys().serializer());
+  }
+
+  /**
+   * Builder class to build a new {@link KafkaRecordSender}.
+   *
+   * @param <T> Type of the records that should later be send.
+   */
+  public static class Builder<T extends SpecificRecord> {
+
+    private final String bootstrapServers;
+    private final String topic;
+    private final String schemaRegistryUrl;
+    private Function<T, String> keyAccessor = x -> ""; // NOPMD
+    private Function<T, Long> timestampAccessor = x -> null; // NOPMD
+    private Properties defaultProperties = new Properties(); // NOPMD
+
+    /**
+     * Creates a Builder object for a {@link KafkaRecordSender}.
+     *
+     * @param bootstrapServers The Server to for accessing Kafka.
+     * @param topic The topic where to write.
+     * @param schemaRegistryUrl URL to the schema registry for avro.
+     */
+    public Builder(final String bootstrapServers, final String topic,
+        final String schemaRegistryUrl) {
+      this.bootstrapServers = bootstrapServers;
+      this.topic = topic;
+      this.schemaRegistryUrl = schemaRegistryUrl;
+    }
+
+    public Builder<T> keyAccessor(final Function<T, String> keyAccessor) {
+      this.keyAccessor = keyAccessor;
+      return this;
+    }
+
+    public Builder<T> timestampAccessor(final Function<T, Long> timestampAccessor) {
+      this.timestampAccessor = timestampAccessor;
+      return this;
+    }
+
+    public Builder<T> defaultProperties(final Properties defaultProperties) {
+      this.defaultProperties = defaultProperties;
+      return this;
+    }
+
+    public KafkaRecordSender<T> build() {
+      return new KafkaRecordSender<>(this);
+    }
+  }
+
+  /**
+   * Write the passed monitoring record to Kafka.
+   */
+  public void write(final T monitoringRecord) {
+    final ProducerRecord<String, T> record =
+        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
+            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
+
+    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
+    this.producer.send(record);
+  }
+
+  public void terminate() {
+    this.producer.close();
+  }
+
+  @Override
+  public void transport(final T message) {
+    this.write(message);
+  }
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
new file mode 100644
index 0000000000000000000000000000000000000000..2249abcbcb1071cf880b2ee80f5d41f2b3dab463
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java
@@ -0,0 +1,202 @@
+package theodolite.commons.workloadgeneration.communication.zookeeper;
+
+import java.nio.charset.StandardCharsets;
+import java.util.function.BiConsumer;
+import org.apache.curator.framework.CuratorFramework;
+import org.apache.curator.framework.CuratorFrameworkFactory;
+import org.apache.curator.framework.api.CuratorWatcher;
+import org.apache.curator.framework.recipes.atomic.AtomicValue;
+import org.apache.curator.framework.recipes.atomic.DistributedAtomicInteger;
+import org.apache.curator.retry.ExponentialBackoffRetry;
+import org.apache.zookeeper.CreateMode;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher.Event.EventType;
+import org.apache.zookeeper.data.Stat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.misc.WorkloadDefinition;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * The central class responsible for distributing the workload through all workload generators.
+ */
+public class WorkloadDistributor {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadDistributor.class);
+
+  private static final String NAMESPACE = "workload-generation";
+  private static final String COUNTER_PATH = "/counter";
+  private static final String WORKLOAD_PATH = "/workload";
+  private static final String WORKLOAD_DEFINITION_PATH = "/workload/definition";
+
+  // Curator retry strategy
+  private static final int BASE_SLEEP_TIME_MS = 2000;
+  private static final int MAX_RETRIES = 5;
+
+  // Wait time
+  private static final int MAX_WAIT_TIME = 20_000;
+
+  private final DistributedAtomicInteger counter;
+  private final KeySpace keySpace;
+  private final BeforeAction beforeAction;
+  private final BiConsumer<WorkloadDefinition, Integer> workerAction;
+
+  private final int instances;
+  private final ZooKeeper zooKeeper; // NOPMD keep instance variable instead of local variable
+  private final CuratorFramework client;
+
+  private boolean workloadGenerationStarted = false; // NOPMD explicit intention that false
+
+  /**
+   * Create a new workload distributor.
+   *
+   * @param keySpace the keyspace for the workload generation.
+   * @param beforeAction the before action for the workload generation.
+   * @param workerAction the action to perform by the workers.
+   */
+  public WorkloadDistributor(
+      final int instances,
+      final ZooKeeper zooKeeper,
+      final KeySpace keySpace,
+      final BeforeAction beforeAction,
+      final BiConsumer<WorkloadDefinition, Integer> workerAction) {
+    this.instances = instances;
+    this.zooKeeper = zooKeeper;
+    this.keySpace = keySpace;
+    this.beforeAction = beforeAction;
+    this.workerAction = workerAction;
+
+    this.client = CuratorFrameworkFactory.builder()
+        .namespace(NAMESPACE)
+        .connectString(this.zooKeeper.getHost() + ":" + this.zooKeeper.getPort())
+        .retryPolicy(new ExponentialBackoffRetry(BASE_SLEEP_TIME_MS, MAX_RETRIES))
+        .build();
+
+    this.client.start();
+
+    try {
+      this.client.blockUntilConnected();
+    } catch (final InterruptedException e) {
+      LOGGER.error(e.getMessage(), e);
+      throw new IllegalStateException(e);
+    }
+
+    this.counter =
+        new DistributedAtomicInteger(this.client, COUNTER_PATH,
+            new ExponentialBackoffRetry(BASE_SLEEP_TIME_MS, MAX_RETRIES));
+  }
+
+  /**
+   * Start the workload distribution.
+   */
+  public void start() {
+    try {
+      AtomicValue<Integer> result = this.counter.increment();
+      while (!result.succeeded()) {
+        result = this.counter.increment();
+      }
+
+      final int workerId = result.preValue();
+
+      final CuratorWatcher watcher = this.buildWatcher(workerId);
+
+      final Stat nodeExists =
+          this.client.checkExists().creatingParentsIfNeeded().forPath(WORKLOAD_PATH);
+      if (nodeExists == null) {
+        this.client.create().forPath(WORKLOAD_PATH);
+      }
+
+      if (workerId == 0) {
+        LOGGER.info("This instance is master with id {}", workerId);
+
+        this.beforeAction.run();
+
+        // register worker action, as master acts also as worker
+        this.client.getChildren().usingWatcher(watcher).forPath(WORKLOAD_PATH);
+
+        LOGGER.info("Number of Workers: {}", this.instances);
+
+        final WorkloadDefinition definition =
+            new WorkloadDefinition(this.keySpace, this.instances);
+
+        this.client.create().withMode(CreateMode.EPHEMERAL).forPath(WORKLOAD_DEFINITION_PATH,
+            definition.toString().getBytes(StandardCharsets.UTF_8));
+
+      } else {
+        LOGGER.info("This instance is worker with id {}", workerId);
+
+        this.client.getChildren().usingWatcher(watcher).forPath(WORKLOAD_PATH);
+
+        final Stat definitionExists =
+            this.client.checkExists().creatingParentsIfNeeded().forPath(WORKLOAD_DEFINITION_PATH);
+
+        if (definitionExists != null) {
+          this.startWorkloadGeneration(workerId);
+        }
+      }
+
+      Thread.sleep(MAX_WAIT_TIME);
+
+      if (!this.workloadGenerationStarted) {
+        LOGGER.warn("No workload definition retrieved for 20 s. Terminating now..");
+      }
+    } catch (final Exception e) { // NOPMD need to catch exception because of external framework
+      LOGGER.error(e.getMessage(), e);
+      throw new IllegalStateException("Error when starting the distribution of the workload.", e);
+    }
+  }
+
+  /**
+   * Start the workload generation. This methods body does only get executed once.
+   *
+   * @param workerId the ID of this worker
+   * @throws Exception when an error occurs
+   */
+  // NOPMD because exception thrown from used framework
+  private synchronized void startWorkloadGeneration(final int workerId) throws Exception { // NOPMD
+
+    if (!this.workloadGenerationStarted) {
+      this.workloadGenerationStarted = true;
+
+      final byte[] bytes =
+          this.client.getData().forPath(WORKLOAD_DEFINITION_PATH);
+      final WorkloadDefinition definition =
+          WorkloadDefinition.fromString(new String(bytes, StandardCharsets.UTF_8));
+
+      this.workerAction.accept(definition, workerId);
+    }
+  }
+
+  /**
+   * Build a curator watcher which performs the worker action.
+   *
+   * @param worker the worker to create the watcher for.
+   * @return the curator watcher.
+   */
+  private CuratorWatcher buildWatcher(final int workerId) {
+    return new CuratorWatcher() {
+
+      @Override
+      public void process(final WatchedEvent event) {
+        if (event.getType() == EventType.NodeChildrenChanged) {
+          try {
+            WorkloadDistributor.this.startWorkloadGeneration(workerId);
+          } catch (final Exception e) { // NOPMD external framework throws exception
+            LOGGER.error(e.getMessage(), e);
+            throw new IllegalStateException("Error starting workload generation.", e);
+          }
+        }
+      }
+    };
+  }
+
+  /**
+   * Stop the workload distributor.
+   */
+  public void stop() {
+    this.client.close();
+  }
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
new file mode 100644
index 0000000000000000000000000000000000000000..2eaa1d487f67ae8325a3622a7ae6c4529fbb1cd6
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java
@@ -0,0 +1,56 @@
+package theodolite.commons.workloadgeneration.dimensions;
+
+import theodolite.commons.workloadgeneration.generators.AbstractWorkloadGenerator;
+
+/**
+ * Wrapper class for the definition of the Keys that should be used by the
+ * {@link AbstractWorkloadGenerator}.
+ */
+public class KeySpace {
+
+  private final String prefix;
+  private final int min;
+  private final int max;
+
+
+  /**
+   * Create a new key space. All keys will have the prefix {@code prefix}. The remaining part of
+   * each key will be determined by a number of the interval ({@code min}, {@code max}-1).
+   *
+   * @param prefix the prefix to use for all keys
+   * @param min the lower bound (inclusive) to start counting from
+   * @param max the upper bound (exclusive) to count to
+   */
+  public KeySpace(final String prefix, final int min, final int max) {
+    if (prefix == null || prefix.contains(";")) {
+      throw new IllegalArgumentException(
+          "The prefix must not be null and must not contain the ';' character.");
+    }
+    this.prefix = prefix;
+    this.min = min;
+    this.max = max;
+
+  }
+
+  public KeySpace(final String prefix, final int numberOfKeys) {
+    this(prefix, 0, numberOfKeys - 1);
+  }
+
+  public KeySpace(final int numberOfKeys) {
+    this("sensor_", 0, numberOfKeys - 1);
+  }
+
+  public String getPrefix() {
+    return this.prefix;
+  }
+
+
+  public int getMin() {
+    return this.min;
+  }
+
+
+  public int getMax() {
+    return this.max;
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
new file mode 100644
index 0000000000000000000000000000000000000000..7914a4985b6df40f7146c1fd681d1fba063f8b98
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java
@@ -0,0 +1,11 @@
+package theodolite.commons.workloadgeneration.functions;
+
+/**
+ * Describes the before action which is executed before every sub experiment.
+ */
+@FunctionalInterface
+public interface BeforeAction {
+
+  public void run();
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..672b579ebbdf3cbb08f3d05d9511c9077f9dac6b
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java
@@ -0,0 +1,14 @@
+package theodolite.commons.workloadgeneration.functions;
+
+/**
+ * This interface describes a function that takes meta information from a string (e.g. an ID) and
+ * produces an object of type T.
+ *
+ * @param <T> the type of the objects that will be generated by the function.
+ */
+@FunctionalInterface
+public interface MessageGenerator<T> {
+
+  T generateMessage(final String key);
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
new file mode 100644
index 0000000000000000000000000000000000000000..7e5100a4e99f13a98156311a9d892c9626b2318a
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java
@@ -0,0 +1,14 @@
+package theodolite.commons.workloadgeneration.functions;
+
+/**
+ * This interface describes a function that consumes a message {@code T}. This function is dedicated
+ * to be used to transport individual messages to the messaging system.
+ *
+ * @param <T> the type of records to send as messages.
+ */
+@FunctionalInterface
+public interface Transport<T> {
+
+  void transport(final T message);
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..7f372e014371e5407374493b6aced3bf949a1674
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java
@@ -0,0 +1,139 @@
+package theodolite.commons.workloadgeneration.generators;
+
+import java.time.Duration;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.workloadgeneration.communication.zookeeper.WorkloadDistributor;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+import theodolite.commons.workloadgeneration.functions.Transport;
+import theodolite.commons.workloadgeneration.misc.WorkloadDefinition;
+import theodolite.commons.workloadgeneration.misc.WorkloadEntity;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * Base for workload generators.
+ *
+ * @param <T> The type of records the workload generator is dedicated for.
+ */
+public abstract class AbstractWorkloadGenerator<T>
+    implements WorkloadGenerator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(AbstractWorkloadGenerator.class);
+
+  private final int instances; // NOPMD keep instance variable instead of local variable
+  private final ZooKeeper zooKeeper; // NOPMD keep instance variable instead of local variable
+  private final KeySpace keySpace;// NOPMD keep instance variable instead of local variable
+  private final BeforeAction beforeAction; // NOPMD keep instance variable instead of local variable
+  private final BiFunction<WorkloadDefinition, Integer, List<WorkloadEntity<T>>> workloadSelector;
+  private final MessageGenerator<T> generatorFunction;
+  private final Transport<T> transport;
+  private WorkloadDistributor workloadDistributor; // NOPMD keep instance variable instead of local
+  private final ScheduledExecutorService executor;
+
+  /**
+   * Create a new workload generator.
+   *
+   * @param instances the number of workload-generator instances.
+   * @param zooKeeper the zookeeper connection.
+   * @param keySpace the keyspace.
+   * @param threads the number of threads that is used to generate the load.
+   * @param period the period, how often a new record is emitted.
+   * @param duration the maximum runtime.
+   * @param beforeAction the action to perform before the workload generation starts.
+   * @param generatorFunction the function that is used to generate the individual records.
+   * @param transport the function that is used to send generated messages to the messaging system.
+   */
+  public AbstractWorkloadGenerator(
+      final int instances,
+      final ZooKeeper zooKeeper,
+      final KeySpace keySpace,
+      final int threads,
+      final Duration period,
+      final Duration duration,
+      final BeforeAction beforeAction,
+      final MessageGenerator<T> generatorFunction,
+      final Transport<T> transport) {
+    this.instances = instances;
+    this.zooKeeper = zooKeeper;
+    this.keySpace = keySpace;
+    this.beforeAction = beforeAction;
+    this.generatorFunction = generatorFunction;
+    this.workloadSelector = (workloadDefinition, workerId) -> {
+      final List<WorkloadEntity<T>> workloadEntities = new LinkedList<>();
+
+      for (int i =
+          workloadDefinition.getKeySpace().getMin() + workerId; i <= workloadDefinition
+              .getKeySpace().getMax(); i += workloadDefinition.getNumberOfWorkers()) {
+        final String id = workloadDefinition.getKeySpace().getPrefix() + i;
+        workloadEntities.add(new WorkloadEntity<>(id, this.generatorFunction));
+      }
+
+      return workloadEntities;
+    };
+    this.transport = transport;
+
+    this.executor = Executors.newScheduledThreadPool(threads);
+    final Random random = new Random();
+
+    final int periodMs = (int) period.toMillis();
+
+    LOGGER.info("Period: {}", periodMs);
+
+    final BiConsumer<WorkloadDefinition, Integer> workerAction = (declaration, workerId) -> {
+
+      final List<WorkloadEntity<T>> entities = this.workloadSelector.apply(declaration, workerId);
+
+      LOGGER.info("Beginning of Experiment...");
+      LOGGER.info("Generating records for {} keys.", entities.size());
+      LOGGER.info("Experiment is going to be executed for the specified duration...");
+
+      entities.forEach(entity -> {
+        final T message = entity.generateMessage();
+        final long initialDelay = random.nextInt(periodMs);
+        final Runnable task = () -> this.transport.transport(message);
+        this.executor.scheduleAtFixedRate(task, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+      });
+
+
+      try {
+        this.executor.awaitTermination(duration.getSeconds(), TimeUnit.SECONDS);
+        LOGGER.info("Terminating now...");
+        this.stop();
+      } catch (final InterruptedException e) {
+        LOGGER.error("", e);
+        throw new IllegalStateException("Error when terminating the workload generation.", e);
+      }
+    };
+
+    this.workloadDistributor = new WorkloadDistributor(
+        this.instances,
+        this.zooKeeper,
+        this.keySpace,
+        this.beforeAction,
+        workerAction);
+  }
+
+  /**
+   * Start the workload generation. The generation terminates automatically after the specified
+   * {@code duration}.
+   */
+  @Override
+  public void start() {
+    this.workloadDistributor.start();
+  }
+
+  @Override
+  public void stop() {
+    this.workloadDistributor.stop();
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..944cec6a2dffed886f06fad1e36c9d35375fe15c
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java
@@ -0,0 +1,59 @@
+package theodolite.commons.workloadgeneration.generators;
+
+import java.time.Duration;
+import org.apache.avro.specific.SpecificRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * Workload generator for generating load for the kafka messaging system.
+ *
+ * @param <T> The type of records the workload generator is dedicated for.
+ */
+public class KafkaWorkloadGenerator<T extends SpecificRecord>
+    extends AbstractWorkloadGenerator<T> {
+
+  private final KafkaRecordSender<T> recordSender;
+
+  /**
+   * Create a new workload generator.
+   *
+   * @param zooKeeper a reference to the ZooKeeper instance.
+   * @param keySpace the key space to generate the workload for.
+   * @param threads tha amount of threads to use per instance.
+   * @param period the period how often a message is generated for each key specified in the
+   *        {@code keySpace}
+   * @param duration the duration how long the workload generator will emit messages.
+   * @param beforeAction the action which will be performed before the workload generator starts
+   *        generating messages. If {@code null}, no before action will be performed.
+   * @param generatorFunction the generator function. This function is executed, each time a message
+   *        is generated.
+   * @param recordSender the record sender which is used to send the generated messages to kafka.
+   */
+  public KafkaWorkloadGenerator(
+      final int instances,
+      final ZooKeeper zooKeeper,
+      final KeySpace keySpace,
+      final int threads,
+      final Duration period,
+      final Duration duration,
+      final BeforeAction beforeAction,
+      final MessageGenerator<T> generatorFunction,
+      final KafkaRecordSender<T> recordSender) {
+    super(instances, zooKeeper, keySpace, threads, period, duration, beforeAction,
+        generatorFunction,
+        recordSender);
+    this.recordSender = recordSender;
+  }
+
+
+  @Override
+  public void stop() {
+    this.recordSender.terminate();
+
+    super.stop();
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..785087c13480b7149a5726dfce8bbf4307b57933
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java
@@ -0,0 +1,185 @@
+package theodolite.commons.workloadgeneration.generators;
+
+import java.time.Duration;
+import java.util.Objects;
+import org.apache.avro.specific.SpecificRecord;
+import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender;
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+import theodolite.commons.workloadgeneration.functions.BeforeAction;
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+import theodolite.commons.workloadgeneration.misc.ZooKeeper;
+
+/**
+ * Builder for {@link workload generators}.
+ *
+ * @param <T> the record for which the builder is dedicated for.
+ */
+public final class KafkaWorkloadGeneratorBuilder<T extends SpecificRecord> { // NOPMD
+
+  private int instances; // NOPMD
+  private ZooKeeper zooKeeper; // NOPMD
+  private KeySpace keySpace; // NOPMD
+  private int threads; // NOPMD
+  private Duration period; // NOPMD
+  private Duration duration; // NOPMD
+  private BeforeAction beforeAction; // NOPMD
+  private MessageGenerator<T> generatorFunction; // NOPMD
+  private KafkaRecordSender<T> kafkaRecordSender; // NOPMD
+
+  private KafkaWorkloadGeneratorBuilder() {
+
+  }
+
+  /**
+   * Get a builder for the {@link KafkaWorkloadGenerator}.
+   *
+   * @return the builder.
+   */
+  public static <T extends SpecificRecord> KafkaWorkloadGeneratorBuilder<T> builder() {
+    return new KafkaWorkloadGeneratorBuilder<>();
+  }
+
+  /**
+   * Set the number of instances.
+   *
+   * @param instances the number of instances.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> instances(final int instances) {
+    this.instances = instances;
+    return this;
+  }
+
+  /**
+   * Set the ZooKeeper reference.
+   *
+   * @param zooKeeper a reference to the ZooKeeper instance.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> zooKeeper(final ZooKeeper zooKeeper) {
+    this.zooKeeper = zooKeeper;
+    return this;
+  }
+
+  /**
+   * Set the before action for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param beforeAction the {@link BeforeAction}.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> beforeAction(final BeforeAction beforeAction) {
+    this.beforeAction = beforeAction;
+    return this;
+  }
+
+  /**
+   * Set the key space for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param keySpace the {@link KeySpace}.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> keySpace(final KeySpace keySpace) {
+    this.keySpace = keySpace;
+    return this;
+  }
+
+  /**
+   * Set the key space for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param threads the number of threads.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> threads(final int threads) {
+    this.threads = threads;
+    return this;
+  }
+
+  /**
+   * Set the period for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param period the {@link Period}
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> period(final Duration period) {
+    this.period = period;
+    return this;
+  }
+
+  /**
+   * Set the durtion for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param duration the {@link Duration}.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> duration(final Duration duration) {
+    this.duration = duration;
+    return this;
+  }
+
+  /**
+   * Set the generator function for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param generatorFunction the generator function.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> generatorFunction(
+      final MessageGenerator<T> generatorFunction) {
+    this.generatorFunction = generatorFunction;
+    return this;
+  }
+
+  /**
+   * Set the {@link KafkaRecordSender} for the {@link KafkaWorkloadGenerator}.
+   *
+   * @param kafkaRecordSender the record sender to use.
+   * @return the builder.
+   */
+  public KafkaWorkloadGeneratorBuilder<T> kafkaRecordSender(
+      final KafkaRecordSender<T> kafkaRecordSender) {
+    this.kafkaRecordSender = kafkaRecordSender;
+    return this;
+  }
+
+  /**
+   * Build the actual {@link KafkaWorkloadGenerator}. The following parameters are must be
+   * specicified before this method is called:
+   * <ul>
+   * <li>zookeeper</li>
+   * <li>key space</li>
+   * <li>period</li>
+   * <li>duration</li>
+   * <li>generator function</li>
+   * <li>kafka record sender</li>
+   * </ul>
+   *
+   * @return the built instance of the {@link KafkaWorkloadGenerator}.
+   */
+  public KafkaWorkloadGenerator<T> build() {
+    if (this.instances < 1) { // NOPMD
+      throw new IllegalArgumentException(
+          "Please specify a valid number of instances. Currently: " + this.instances);
+    }
+    Objects.requireNonNull(this.zooKeeper, "Please specify the ZooKeeper instance.");
+    if (this.threads < 1) { // NOPMD
+      this.threads = 1;
+    }
+    Objects.requireNonNull(this.keySpace, "Please specify the key space.");
+    Objects.requireNonNull(this.period, "Please specify the period.");
+    Objects.requireNonNull(this.duration, "Please specify the duration.");
+    this.beforeAction = Objects.requireNonNullElse(this.beforeAction, () -> {
+    });
+    Objects.requireNonNull(this.generatorFunction, "Please specify the generator function.");
+    Objects.requireNonNull(this.kafkaRecordSender, "Please specify the kafka record sender.");
+
+    return new KafkaWorkloadGenerator<>(
+        this.instances,
+        this.zooKeeper,
+        this.keySpace,
+        this.threads,
+        this.period,
+        this.duration,
+        this.beforeAction,
+        this.generatorFunction,
+        this.kafkaRecordSender);
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..b121ac157b84d64818d9fdfc90589d49fd933752
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java
@@ -0,0 +1,18 @@
+package theodolite.commons.workloadgeneration.generators;
+
+/**
+ * Base methods for workload generators.
+ */
+public interface WorkloadGenerator {
+
+  /**
+   * Start the workload generation.
+   */
+  void start();
+
+  /**
+   * Stop the workload generation.
+   */
+  void stop();
+
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
new file mode 100644
index 0000000000000000000000000000000000000000..86369d6c883954b792b2ee0fd6a988377ecb8965
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java
@@ -0,0 +1,71 @@
+package theodolite.commons.workloadgeneration.misc;
+
+import theodolite.commons.workloadgeneration.dimensions.KeySpace;
+
+/**
+ * The central class that contains all information that needs to be exchanged between the nodes for
+ * distributed workload generation.
+ */
+public class WorkloadDefinition {
+  private static final int ZERO = 0;
+  private static final int ONE = 1;
+  private static final int TWO = 2;
+  private static final int THREE = 3;
+  private static final int FOUR = 4;
+
+  private final KeySpace keySpace;
+  private final int numberOfWorkers;
+
+  /**
+   * Create a new workload definition.
+   *
+   * @param keySpace the key space to use.
+   * @param numberOfWorkers the number of workers participating in the workload generation.
+   */
+  public WorkloadDefinition(final KeySpace keySpace, final int numberOfWorkers) {
+
+    this.keySpace = keySpace;
+    this.numberOfWorkers = numberOfWorkers;
+  }
+
+  public KeySpace getKeySpace() {
+    return this.keySpace;
+  }
+
+  public int getNumberOfWorkers() {
+    return this.numberOfWorkers;
+  }
+
+  /**
+   * Simple method for encoding all information of the workload definition into one string.
+   *
+   * @return a string that encodes all information of the workload generation in a compact format.
+   *         The format is 'keySpace;keySpace.min;keySpace.max;numberOfWorkers'.
+   */
+  @Override
+  public String toString() {
+    return this.getKeySpace().getPrefix() + ";" + this.getKeySpace().getMin() + ";"
+        + this.getKeySpace().getMax() + ";" + this.getNumberOfWorkers();
+  }
+
+  /**
+   * Parse a workload generation from a previously encoded string with the format returned by
+   * {@link WorkloadDefinition#toString()}.
+   *
+   * @param workloadDefinitionString the workload definition string.
+   * @return the parsed workload definition.
+   */
+  public static WorkloadDefinition fromString(final String workloadDefinitionString) {
+    final String[] deserialized = workloadDefinitionString.split(";");
+
+    if (deserialized.length != FOUR) {
+      throw new IllegalArgumentException(
+          "Wrong workload definition string when trying to parse the workload generation.");
+    }
+
+    return new WorkloadDefinition(
+        new KeySpace(deserialized[ZERO], Integer.valueOf(deserialized[ONE]),
+            Integer.valueOf(deserialized[TWO])),
+        Integer.valueOf(deserialized[THREE]));
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
new file mode 100644
index 0000000000000000000000000000000000000000..d8665b3fb53e7d15ed61780e3b91fbfe56f709ba
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java
@@ -0,0 +1,22 @@
+package theodolite.commons.workloadgeneration.misc;
+
+import theodolite.commons.workloadgeneration.functions.MessageGenerator;
+
+/**
+ * Representation of a entity of the workload generation that generates load for one fixed key.
+ *
+ * @param <T> The type of records the workload generator is dedicated for.
+ */
+public class WorkloadEntity<T> {
+  private final String key;
+  private final MessageGenerator<T> generator;
+
+  public WorkloadEntity(final String key, final MessageGenerator<T> generator) {
+    this.key = key;
+    this.generator = generator;
+  }
+
+  public T generateMessage() {
+    return this.generator.generateMessage(this.key);
+  }
+}
diff --git a/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
new file mode 100644
index 0000000000000000000000000000000000000000..a80490600ad9c9c22c198fc76b6d9f73bdc30584
--- /dev/null
+++ b/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java
@@ -0,0 +1,29 @@
+package theodolite.commons.workloadgeneration.misc;
+
+/**
+ * Wrapper for connection information for ZooKeeper.
+ */
+public class ZooKeeper {
+
+  private final String host;
+  private final int port;
+
+  /**
+   * Create a new representation of an ZooKeeper instance.
+   *
+   * @param host of zookeeper.
+   * @param port of zookeeper.
+   */
+  public ZooKeeper(final String host, final int port) {
+    this.host = host;
+    this.port = port;
+  }
+
+  public String getHost() {
+    return this.host;
+  }
+
+  public int getPort() {
+    return this.port;
+  }
+}