diff --git a/.gitignore b/.gitignore
index bef98bd0b29a225ac758c501ea69e6eaf4ba1773..36f08fd69890d38fafe2b8bbf40d830773e737e0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,4 +28,5 @@ tmp/
 *.iml
 *.iws
 
+# Python Venv
 .venv
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index eed9a678a1b10a9943a4bc163fdee0c1fda00067..90effd8588932aca1b1ff6591ccceeda1854908e 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -23,37 +23,60 @@ stages:
 build:
   stage: build
   tags:
-    - dockerex
+    - exec-docker
   script: ./gradlew --build-cache assemble
 
 test:
   stage: test
   tags:
-    - dockerex
-  script: ./gradlew test
+    - exec-docker
+  script: ./gradlew test --continue
+  artifacts:
+    reports:
+      junit:
+        - "**/build/test-results/test/TEST-*.xml"
 
-.checkstyle:
+checkstyle:
   stage: check
   tags:
-    - dockerex
-  script: ./gradlew checkstyle
-  
-.pmd:
+    - exec-docker
+  script: ./gradlew checkstyle --continue
+  allow_failure: true
+  artifacts:
+    paths:
+      - "*/build/reports/checkstyle/main.html"
+    when: on_failure
+    expire_in: 1 day
+
+pmd:
   stage: check
   tags:
-    - dockerex
-  script: ./gradlew pmd
-  
-.spotbugs:
+    - exec-docker
+  script: ./gradlew pmd --continue
+  allow_failure: true
+  artifacts:
+    paths:
+      - "*/build/reports/pmd/*.html"
+    when: on_failure
+    expire_in: 1 day
+
+spotbugs:
   stage: check
   tags:
-    - dockerex
-  script: ./gradlew spotbugs
+    - exec-docker
+  script: ./gradlew spotbugs --continue
+  allow_failure: true
+  artifacts:
+    paths:
+      - "*/build/reports/spotbugs/*.html"
+    when: on_failure
+    expire_in: 1 day
+
 
 .deploy:
   stage: deploy
   tags:
-    - dockerex
+    - exec-docker
   # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
   # for image usage and settings for building with TLS and docker in docker
   image: docker:19.03.1
@@ -73,4 +96,4 @@ test:
     variables:
       - $DOCKERHUB_ORG
       - $DOCKERHUB_ID
-      - $DOCKERHUB_PW
\ No newline at end of file
+      - $DOCKERHUB_PW
diff --git a/build.gradle b/build.gradle
index 957316bff2ad6b7e5ca6432ef43d44aa3e6e7d01..6827860869614f2d0ff575cfb5e6229e6d4a3806 100644
--- a/build.gradle
+++ b/build.gradle
@@ -1,9 +1,4 @@
-apply plugin: 'java-library'
-apply plugin: 'pmd'
-apply plugin: 'checkstyle'
-apply plugin: 'com.github.spotbugs'
-apply plugin: 'eclipse'
-
+// Inherited to all subprojects
 buildscript {
   repositories {
     maven {
@@ -15,15 +10,35 @@ buildscript {
   }
 }
 
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
+// Plugins for all projects
+allprojects {
+  apply plugin: 'eclipse'
+}
+
+// Plugins for subprojects
+subprojects {
+  apply plugin: 'application'
+  apply plugin: 'checkstyle'
+  apply plugin: 'pmd'
+  apply plugin: 'com.github.spotbugs'
+  apply plugin: 'java-library'
+}
+
+// Java version for all subprojects
+subprojects {
+  java {
+    sourceCompatibility = JavaVersion.VERSION_11
+    targetCompatibility = JavaVersion.VERSION_11
+  }
+}
 
+// Check for updates every build
 configurations.all {
-    // Check for updates every build
     resolutionStrategy.cacheChangingModulesFor 0, 'seconds'
 }
 
-allprojects { 
+// Repositories for all projects
+allprojects {
 	repositories {
 	    jcenter()
 	    maven {
@@ -32,42 +47,23 @@ allprojects {
 	}
 }
 
-dependencies {
-    // These dependencies is exported to consumers, that is to say found on their compile classpath.
-    api('org.industrial-devops:titan-ccp-common:0.0.3-SNAPSHOT') { changing = true }
-    api 'net.kieker-monitoring:kieker:1.14-SNAPSHOT'
-    api 'net.sourceforge.teetime:teetime:3.0'
-    
-    // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
-    implementation 'org.apache.kafka:kafka-clients:2.1.0'
-    implementation 'com.google.guava:guava:24.1-jre'
-    implementation 'org.jctools:jctools-core:2.1.1'
-
-    // Use JUnit test framework
-    testImplementation 'junit:junit:4.12'
-}
+// Dependencies
+subprojects {
+  dependencies {
+      // These dependencies is exported to consumers, that is to say found on their compile classpath.
+      api('org.industrial-devops:titan-ccp-common:0.0.3-SNAPSHOT') { changing = true }
+      api 'net.kieker-monitoring:kieker:1.14-SNAPSHOT'
+      api 'net.sourceforge.teetime:teetime:3.0'
 
-pmd {
-  ruleSets = [] // Gradle requires to clean the rule sets first
-  ruleSetFiles = files("config/pmd.xml")
-  ignoreFailures = false
-  toolVersion = "6.7.0"
-}
+      // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
+      implementation 'org.apache.kafka:kafka-clients:2.1.0'
+      implementation 'com.google.guava:guava:24.1-jre'
+      implementation 'org.jctools:jctools-core:2.1.1'
+      implementation 'org.slf4j:slf4j-simple:1.6.1'
 
-checkstyle {
-  configDir = file("config")
-  configFile = file("config/checkstyle.xml")
-  maxWarnings = 0
-  ignoreFailures = false
-  toolVersion = "8.12"
-}
-
-spotbugs {
-  excludeFilter = file("config/spotbugs-exclude-filter.xml")
-  reportLevel = "low"
-  effort = "max"
-  ignoreFailures = false
-  toolVersion = '3.1.7'
+      // Use JUnit test framework
+      testImplementation 'junit:junit:4.12'
+  }
 }
 
 // Per default XML reports for SpotBugs are generated
@@ -80,33 +76,64 @@ tasks.withType(com.github.spotbugs.SpotBugsTask) {
   }
 }
 
-task checkstyle {
-  group 'Quality Assurance'
-  description 'Run Checkstyle'
-  
-  dependsOn 'checkstyleMain'
-  dependsOn 'checkstyleTest'
-}
+// Subprojects quality tools tasks
+subprojects {
+  task pmd {
+    group 'Quality Assurance'
+    description 'Run PMD'
 
-task pmd {
-  group 'Quality Assurance'
-  description 'Run PMD'
+    dependsOn 'pmdMain'
+    dependsOn 'pmdTest'
+  }
+
+  task checkstyle {
+    group 'Quality Assurance'
+    description 'Run Checkstyle'
+
+    dependsOn 'checkstyleMain'
+    dependsOn 'checkstyleTest'
+  }
 
-  dependsOn 'pmdMain'
-  dependsOn 'pmdTest'
+  task spotbugs {
+    group 'Quality Assurance'
+    description 'Run SpotBugs'
+
+    dependsOn 'spotbugsMain'
+    dependsOn 'spotbugsTest'
+  }
 }
 
-task spotbugs {
-  group 'Quality Assurance'
-  description 'Run SpotBugs'
-  
-  dependsOn 'spotbugsMain'
-  dependsOn 'spotbugsTest'
+// Subprojects quality tools configuration
+subprojects {
+  pmd {
+    ruleSets = [] // Gradle requires to clean the rule sets first
+    ruleSetFiles = files("$rootProject.projectDir/config/pmd.xml")
+    ignoreFailures = false
+    toolVersion = "6.7.0"
+  }
+
+  checkstyle {
+    configDirectory = file("$rootProject.projectDir/config")
+    configFile = file("$rootProject.projectDir/config/checkstyle.xml")
+    maxWarnings = 0
+    ignoreFailures = false
+    toolVersion = "8.12"
+  }
+
+  spotbugs {
+    excludeFilter = file("$rootProject.projectDir/config/spotbugs-exclude-filter.xml")
+    reportLevel = "low"
+    effort = "max"
+    ignoreFailures = false
+    toolVersion = '3.1.7'
+  }
 }
 
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+allprojects {
+  eclipse {
+      classpath {
+         downloadSources=true
+         downloadJavadoc=true
+      }
+  }
+}
diff --git a/execution/README.md b/execution/README.md
index 3ada40f99c5eb06d00d2f572bd48543bd078964f..fb864fcdfb5c0791befd802fe7e2fad38383f29e 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -7,12 +7,58 @@ For executing benchmarks, access to Kubernetes cluster is required. We suggest
 to create a dedicated namespace for executing our benchmarks. The following
 services need to be available as well.
 
-### Prometheus (+ Grafana)
+### Prometheus
 
-We suggest to use the Prometheus Operator and create a dedicated prometheus and
-grafana instance for these benchmarks.
+We suggest to use the [Prometheus Operator](https://github.com/coreos/prometheus-operator)
+and create a dedicated Prometheus instance for these benchmarks.
 
-**TODO** Add required configuration, introduce service Monitors
+If Prometheus Operator is not already available on your cluster, a convenient
+way to install is via the [**unofficial** Prometheus Operator Helm chart](https://github.com/helm/charts/tree/master/stable/prometheus-operator).
+As you may not need an entire cluster monitoring stack, you can use our Helm
+configuration to only install the operator:
+
+```sh
+helm install prometheus-operator stable/prometheus-operator -f infrastructure/prometheus/helm-values.yaml
+```
+
+After installation, you need to create a Prometheus instance:
+
+```sh
+kubectl apply -f infrastructure/prometheus/prometheus.yaml
+```
+
+You might also need to apply the [ServiceAccount](infrastructure/prometheus/service-account.yaml), [ClusterRole](infrastructure/prometheus/cluster-role.yaml) 
+and the [CusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml),
+depending on your cluster's security policies.
+
+For the individual benchmarking components to be monitored, [ServiceMonitors](https://github.com/coreos/prometheus-operator#customresourcedefinitions)
+are used. See the corresponding sections below for how to install them.
+
+### Grafana
+
+As with Prometheus, we suggest to create a dedicated Grafana instance. Grafana
+with our default configuration can be installed with Helm:
+
+```sh
+helm install grafana stable/grafana -f infrastructure/grafana/values.yaml
+```
+
+The official [Grafana Helm Chart repository](https://github.com/helm/charts/tree/master/stable/grafana)
+provides further documentation including a table of configuration options.
+
+We provide ConfigMaps for a [Grafana dashboard](infrastructure/grafana/dashboard-config-map.yaml) and a [Grafana data source](infrastructure/grafana/prometheus-datasource-config-map.yaml).
+
+Create the Configmap for the dashboard:
+
+```sh
+kubectl apply -f infrastructure/grafana/dashboard-config-map.yaml
+```
+
+Create the Configmap for the data source:
+
+```sh
+kubectl apply -f infrastructure/grafana/prometheus-datasource-config-map.yaml
+```
 
 ### A Kafka cluster
 
@@ -22,6 +68,7 @@ below), we provide a [patch](https://github.com/SoerenHenning/cp-helm-charts)
 for these helm charts. Note that this patch is only required for observation and
 not for the actual benchmark execution and evaluation.
 
+<<<<<<< HEAD
 **TODO** Add required configuration, installation
 
 ### The Kafka Lag Exporter
@@ -33,11 +80,93 @@ helm install kafka-lag-exporter https://github.com/lightbend/kafka-lag-exporter/
 ``
 
 **TODO** Add configuration + ServiceMonitor
+=======
+#### Our patched Confluent Helm Charts
+
+To use our patched Confluent Helm Charts clone the
+[chart's repsoitory](https://github.com/SoerenHenning/cp-helm-charts). We also
+provide a [default configuration](infrastructure/kafka/values.yaml). If you do
+not want to deploy 10 Kafka and 3 Zookeeper instances, alter the configuration
+file accordingly. To install Confluent's Kafka and use the configuration:
+
+```sh
+helm install my-confluent <path-to-cp-helm-charts> -f infrastructure/kafka/values.yaml
+```
+
+To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor:
+
+```sh
+kubectl apply -f infrastructure/kafka/service-monitor.yaml
+```
+
+#### Other options for Kafka
+
+Other Kafka deployments, for example, using Strimzi, should work in similiar way.
+
+### The Kafka Lag Exporter
+
+[Lightbend's Kafka Lag Exporter](https://github.com/lightbend/kafka-lag-exporter)
+can be installed via Helm. We also provide a [default configuration](infrastructure/kafka-lag-exporter/values.yaml).
+To install it:
+
+```sh
+helm install kafka-lag-exporter https://github.com/lightbend/kafka-lag-exporter/releases/download/v0.6.0/kafka-lag-exporter-0.6.0.tgz -f infrastructure/kafka-lag-exporter/values.yaml
+```
+
+To let Prometheus scrape Kafka lag metrics, deploy a ServiceMonitor:
+
+```sh
+kubectl apply -f infrastructure/kafka-lag-exporter/service-monitor.yaml
+```
+>>>>>>> 624692753eb09684dd3dda3926482e9b56ada0d6
 
 
 ## Python 3.7
 
+<<<<<<< HEAD
 For executing benchmarks and analyzing their results, a Python 3.7 installation
 is required. We suggest to use a virtual environment placed in the `.venv` directory.
 
 **TODO** Show how to install requirements
+=======
+For executing benchmarks and analyzing their results, a **Python 3.7** installation
+is required. We suggest to use a virtual environment placed in the `.venv` directory.
+
+As set of requirements is needed for the analysis Jupyter notebooks and the
+execution tool. You can install them with the following command (make sure to
+be in your virtual environment if you use one):
+
+```sh
+pip install -r requirements.txt 
+```
+
+
+## Required Manual Adjustments
+
+Depending on your setup, some additional adjustments may be necessary:
+
+* Change Kafka and Zookeeper servers in the Kubernetes deployments (uc1-application etc.) and `run_XX.sh` scripts
+* Change Prometheus' URL in `lag_analysis.py`
+* Change the path to your Python 3.7 virtual environment in the `run_XX.sh` schripts (to find the venv's `bin/activate`)
+* Change the name of your Kubernetes namespace for [Prometheus' ClusterRoleBinding](infrastructure/prometheus/cluster-role-binding.yaml)
+* *Please let us know if there are further adjustments necessary*
+
+
+
+# Execution
+
+The `./run_loop.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
+
+```sh
+./run_loop.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
+```
+
+* `<use-case>`: Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`.
+* `<wl-values>`: Values for the workload generator to be tested, separated by commas. For example `100000, 200000, 300000`.
+* `<instances>`: Numbers of instances to be benchmarked, separated by commas. For example `1, 2, 3, 4`.
+* `<partitions>`: Number of partitions for Kafka topics. Optional. Default `40`.
+* `<cpu-limit>`: Kubernetes CPU limit. Optional. Default `1000m`.
+* `<memory-limit>`: Kubernetes memory limit. Optional. Default `4Gi`.
+* `<commit-interval>`: Kafka Streams' commit interval in milliseconds. Optional. Default `100`.
+* `<duration>`: Duration in minutes subexperiments should be executed for. Optional. Default `5`.
+>>>>>>> 624692753eb09684dd3dda3926482e9b56ada0d6
diff --git a/execution/execution.sh b/execution/execution.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0a1ead95049564b9d88f35d40ea622788119e4dc
--- /dev/null
+++ b/execution/execution.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+./run_loop.sh 1 "25000 50000 75000 100000 125000 150000" "1 2 3 4 5" 40 #6*5=3Std
+sleep 5m
+./run_loop.sh 2 "6 7 8 9" "1 2 3 4 6 8 10 12 14 16 18 20" 40 #4*12=5Std
+sleep 5m
+./run_loop.sh 3 "25000 50000 75000 100000 125000 150000" "1 2 3 4 5 6" 40 #6*6=3.5Std
+sleep 5m
+./run_loop.sh 4 "25000 50000 75000 100000 125000 150000" "1 2 4 6 8 10 12 14 16 18 20 30 40 50 60 70 80 90" 40 #6*18=11Std
+sleep 5m
+
+./run_loop.sh 1 "25000 50000 75000 100000 125000 150000" "1 2 3 4 5" 400 #6*5=3Std
+sleep 5m
+./run_loop.sh 2 "6 7 8 9" "1 2 3 4 6 8 10 12 14 16 18 20" 400 #4*12=5Std
+sleep 5m
+./run_loop.sh 3 "25000 50000 75000 100000 125000 150000" "1 2 3 4 5 6" 400 #6*6=3.5Std
+sleep 5m
+./run_loop.sh 4 "25000 50000 75000 100000 125000 150000" "1 2 4 6 8 10 12 14 16 18 20 30 40 50 60 70 80 90" 400 #6*18=11Std
+sleep 5m
+./run_loop.sh 4 "150000" "100 110 120 130 140 150 160 17 18 190 200" 400 #6*18=11Std
+sleep 5m
+# For commit interval evaluation
+./run_loop.sh 4 "5000 10000 15000 20000 25000 30000" "1 2 3 4 5 6 7 8 9 10 11 12 13 14 15" 160
\ No newline at end of file
diff --git a/execution/execution_tmp_200507.sh b/execution/execution_tmp_200507.sh
new file mode 100644
index 0000000000000000000000000000000000000000..932940ae78dc5e5f0d2362da1047329a22713f51
--- /dev/null
+++ b/execution/execution_tmp_200507.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+#./run_loop.sh 1 "50000 100000 150000 200000 250000 300000" "1 2 3 4 5" 40 #3Std
+./run_loop.sh 1 "200000 250000 300000" "1 2 3 4 5" 40 1000m 4Gi 100 5 #1.5Std
+sleep 1m
+#./run_loop.sh 1 "50000 100000 150000 200000 250000 300000" "1 2 3 4 5" 400 #3Std
+./run_loop.sh 1 "200000 250000 300000" "1 2 3 4 5" 400 1000m 4Gi 100 5 #1.5Std
+sleep 1m
+
+#./run_loop.sh 3 "50000 100000 150000 200000 250000 300000" "1 2 3 4 5 6 7 8 9 10" 40 #6 Std
+./run_loop.sh 3 "200000 250000 300000" "1 2 3 4 5 6 7 8 9 10" 40 1000m 4Gi 100 5 #3 Std
+sleep 1m
+#./run_loop.sh 3 "50000 100000 150000 200000 250000 300000" "1 2 3 4 5 6 7 8 9 10" 400 #6 Std
+./run_loop.sh 3 "200000 250000 300000" "1 2 3 4 5 6 7 8 9 10" 400 1000m 4Gi 100 5 #3 Std
+sleep 1m
+
+./run_loop.sh 1 "50000 100000 150000 200000 250000 300000" "1 2 3 4 5" 40 500m 2Gi 100 5 #3Std
diff --git a/execution/exp_counter.txt b/execution/exp_counter.txt
new file mode 100644
index 0000000000000000000000000000000000000000..573541ac9702dd3969c9bc859d2b91ec1f7e6e56
--- /dev/null
+++ b/execution/exp_counter.txt
@@ -0,0 +1 @@
+0
diff --git a/execution/experiments.txt b/execution/experiments.txt
new file mode 100644
index 0000000000000000000000000000000000000000..5ef8210943cb4404777032c7978f2ee3fb6bca56
--- /dev/null
+++ b/execution/experiments.txt
@@ -0,0 +1,2 @@
+# Test Partition count of 100
+./run_loop.sh 1 "10000 50000 100000 200000" "1, 2, 4, 8, 12, 16, 20" 100
\ No newline at end of file
diff --git a/execution/infrastructure/grafana/dashboard-config-map.yaml b/execution/infrastructure/grafana/dashboard-config-map.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e858ffe7dfdd4fbcdf1592b0f564c305969f6af5
--- /dev/null
+++ b/execution/infrastructure/grafana/dashboard-config-map.yaml
@@ -0,0 +1,1005 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: scalability
+  labels:
+    grafana_dashboard: "1"
+data:
+  k8s-dashboard.json: |-
+    {
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": 2,
+    "iteration": 1589140028684,
+    "links": [],
+    "panels": [
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 7,
+          "w": 12,
+          "x": 0,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 2,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_input)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{Messages In Per Second}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Messages In Per Second",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 7,
+          "w": 12,
+          "x": 12,
+          "y": 0
+        },
+        "hiddenSeries": false,
+        "id": 3,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_output)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{Messages Out Per Second}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Messages Out Per Second",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 7
+        },
+        "hiddenSeries": false,
+        "id": 9,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": true,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag > 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Record Lag",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 7
+        },
+        "hiddenSeries": false,
+        "id": 5,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "instances",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Instances",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "decimals": 0,
+            "format": "short",
+            "label": "",
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 15
+        },
+        "hiddenSeries": false,
+        "id": 10,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group,topic) (kafka_consumergroup_group_offset > 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Records Consumed",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 15
+        },
+        "hiddenSeries": false,
+        "id": 12,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count by(group,topic) (kafka_consumergroup_group_offset > 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Partitions",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 23
+        },
+        "hiddenSeries": false,
+        "id": 11,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group,topic) (kafka_partition_latest_offset)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Records Produced (Kafka Lag Exporter)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 23
+        },
+        "hiddenSeries": false,
+        "id": 8,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "count by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Number of Partitions (Kafka Streams Export)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "decimals": null,
+            "format": "short",
+            "label": "",
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 0,
+          "y": 31
+        },
+        "hiddenSeries": false,
+        "id": 4,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": false,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "{{topic}}",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Record Lag (Kafka Streams Export)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      },
+      {
+        "aliasColors": {},
+        "bars": false,
+        "dashLength": 10,
+        "dashes": false,
+        "datasource": null,
+        "fill": 1,
+        "fillGradient": 0,
+        "gridPos": {
+          "h": 8,
+          "w": 12,
+          "x": 12,
+          "y": 31
+        },
+        "hiddenSeries": false,
+        "id": 13,
+        "legend": {
+          "alignAsTable": false,
+          "avg": false,
+          "current": false,
+          "max": false,
+          "min": false,
+          "show": true,
+          "total": false,
+          "values": false
+        },
+        "lines": true,
+        "linewidth": 1,
+        "links": [],
+        "nullPointMode": "null",
+        "options": {
+          "dataLinks": []
+        },
+        "percentage": false,
+        "pointradius": 5,
+        "points": false,
+        "renderer": "flot",
+        "seriesOverrides": [],
+        "spaceLength": 10,
+        "stack": true,
+        "steppedLine": false,
+        "targets": [
+          {
+            "expr": "sum by(group) (kafka_consumergroup_group_lag > 0)",
+            "format": "time_series",
+            "intervalFactor": 1,
+            "legendFormat": "total lag",
+            "refId": "D"
+          }
+        ],
+        "thresholds": [],
+        "timeFrom": null,
+        "timeRegions": [],
+        "timeShift": null,
+        "title": "Total Record Lag (Kafka Lag Exporter)",
+        "tooltip": {
+          "shared": true,
+          "sort": 0,
+          "value_type": "individual"
+        },
+        "type": "graph",
+        "xaxis": {
+          "buckets": null,
+          "mode": "time",
+          "name": null,
+          "show": true,
+          "values": []
+        },
+        "yaxes": [
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": "0",
+            "show": true
+          },
+          {
+            "format": "short",
+            "label": null,
+            "logBase": 1,
+            "max": null,
+            "min": null,
+            "show": true
+          }
+        ],
+        "yaxis": {
+          "align": false,
+          "alignLevel": null
+        }
+      }
+    ],
+    "refresh": "10s",
+    "schemaVersion": 21,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": [
+        {
+          "allValue": null,
+          "current": {
+            "tags": [],
+            "text": "titan-ccp-aggregation",
+            "value": "titan-ccp-aggregation"
+          },
+          "datasource": "Prometheus",
+          "definition": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+          "hide": 0,
+          "includeAll": false,
+          "label": "Job",
+          "multi": false,
+          "name": "Job",
+          "options": [
+            {
+              "selected": true,
+              "text": "titan-ccp-aggregation",
+              "value": "titan-ccp-aggregation"
+            }
+          ],
+          "query": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+          "refresh": 0,
+          "regex": "",
+          "skipUrlSync": false,
+          "sort": 0,
+          "tagValuesQuery": "",
+          "tags": [],
+          "tagsQuery": "",
+          "type": "query",
+          "useTags": false
+        }
+      ]
+    },
+    "time": {
+      "from": "now-1h",
+      "to": "now"
+    },
+    "timepicker": {
+      "refresh_intervals": [
+        "5s",
+        "10s",
+        "30s",
+        "1m",
+        "5m",
+        "15m",
+        "30m",
+        "1h",
+        "2h",
+        "1d"
+      ]
+    },
+    "timezone": "",
+    "title": "Scalability Benchmarking",
+    "uid": "dad0CNlZz",
+    "version": 25
+    }
\ No newline at end of file
diff --git a/execution/infrastructure/grafana/prometheus-datasource-config-map.yaml b/execution/infrastructure/grafana/prometheus-datasource-config-map.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ec9aaba2da3f0d8de7c48be8418de08548184b26
--- /dev/null
+++ b/execution/infrastructure/grafana/prometheus-datasource-config-map.yaml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: prometheus
+  labels:
+    grafana_datasource: "1"
+data:
+  datasource.yaml: |-
+    # config file version
+    apiVersion: 1
+    datasources:
+      # <string, required> name of the datasource. Required
+    - name: Prometheus
+      # <string, required> datasource type. Required
+      type: prometheus
+      # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+      access: proxy
+      # <bool> mark as default datasource. Max one per org
+      isDefault: true
+      # <int> org id. will default to orgId 1 if not specified
+      orgId: 1
+      # <string> url
+      url: http://prometheus-operated:9090 #http://localhost:9090
+      # <map> fields that will be converted to json and stored in json_data
+      jsonData:
+        timeInterval: "15s"
+      version: 1
+      # <bool> allow users to edit datasources from the UI.
+      editable: true
diff --git a/execution/infrastructure/grafana/scalability-benchmarking-dashbaord.json b/execution/infrastructure/grafana/scalability-benchmarking-dashbaord.json
new file mode 100644
index 0000000000000000000000000000000000000000..4c7e312d9a5f825c082671aca69b8b3800ea6255
--- /dev/null
+++ b/execution/infrastructure/grafana/scalability-benchmarking-dashbaord.json
@@ -0,0 +1,997 @@
+{
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": "-- Grafana --",
+        "enable": true,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "name": "Annotations & Alerts",
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "gnetId": null,
+  "graphTooltip": 0,
+  "id": 2,
+  "iteration": 1589140028684,
+  "links": [],
+  "panels": [
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 7,
+        "w": 12,
+        "x": 0,
+        "y": 0
+      },
+      "hiddenSeries": false,
+      "id": 2,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_input)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{Messages In Per Second}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Messages In Per Second",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 7,
+        "w": 12,
+        "x": 12,
+        "y": 0
+      },
+      "hiddenSeries": false,
+      "id": 3,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_output)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{Messages Out Per Second}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Messages Out Per Second",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 0,
+        "y": 7
+      },
+      "hiddenSeries": false,
+      "id": 9,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": true,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum by(group, topic) (kafka_consumergroup_group_lag > 0)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{topic}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Record Lag",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 12,
+        "y": 7
+      },
+      "hiddenSeries": false,
+      "id": 5,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "instances",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Number of Instances",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "decimals": 0,
+          "format": "short",
+          "label": "",
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 0,
+        "y": 15
+      },
+      "hiddenSeries": false,
+      "id": 10,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum by(group,topic) (kafka_consumergroup_group_offset > 0)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{topic}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Records Consumed",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 12,
+        "y": 15
+      },
+      "hiddenSeries": false,
+      "id": 12,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "count by(group,topic) (kafka_consumergroup_group_offset > 0)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{topic}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Number of Partitions",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 0,
+        "y": 23
+      },
+      "hiddenSeries": false,
+      "id": 11,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum by(group,topic) (kafka_partition_latest_offset)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{topic}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Records Produced (Kafka Lag Exporter)",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 12,
+        "y": 23
+      },
+      "hiddenSeries": false,
+      "id": 8,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "count by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{topic}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Number of Partitions (Kafka Streams Export)",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "decimals": null,
+          "format": "short",
+          "label": "",
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 0,
+        "y": 31
+      },
+      "hiddenSeries": false,
+      "id": 4,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": false,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum by(job, topic) (kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "{{topic}}",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Record Lag (Kafka Streams Export)",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    },
+    {
+      "aliasColors": {},
+      "bars": false,
+      "dashLength": 10,
+      "dashes": false,
+      "datasource": null,
+      "fill": 1,
+      "fillGradient": 0,
+      "gridPos": {
+        "h": 8,
+        "w": 12,
+        "x": 12,
+        "y": 31
+      },
+      "hiddenSeries": false,
+      "id": 13,
+      "legend": {
+        "alignAsTable": false,
+        "avg": false,
+        "current": false,
+        "max": false,
+        "min": false,
+        "show": true,
+        "total": false,
+        "values": false
+      },
+      "lines": true,
+      "linewidth": 1,
+      "links": [],
+      "nullPointMode": "null",
+      "options": {
+        "dataLinks": []
+      },
+      "percentage": false,
+      "pointradius": 5,
+      "points": false,
+      "renderer": "flot",
+      "seriesOverrides": [],
+      "spaceLength": 10,
+      "stack": true,
+      "steppedLine": false,
+      "targets": [
+        {
+          "expr": "sum by(group) (kafka_consumergroup_group_lag > 0)",
+          "format": "time_series",
+          "intervalFactor": 1,
+          "legendFormat": "total lag",
+          "refId": "D"
+        }
+      ],
+      "thresholds": [],
+      "timeFrom": null,
+      "timeRegions": [],
+      "timeShift": null,
+      "title": "Total Record Lag (Kafka Lag Exporter)",
+      "tooltip": {
+        "shared": true,
+        "sort": 0,
+        "value_type": "individual"
+      },
+      "type": "graph",
+      "xaxis": {
+        "buckets": null,
+        "mode": "time",
+        "name": null,
+        "show": true,
+        "values": []
+      },
+      "yaxes": [
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": "0",
+          "show": true
+        },
+        {
+          "format": "short",
+          "label": null,
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true
+        }
+      ],
+      "yaxis": {
+        "align": false,
+        "alignLevel": null
+      }
+    }
+  ],
+  "refresh": "10s",
+  "schemaVersion": 21,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": [
+      {
+        "allValue": null,
+        "current": {
+          "tags": [],
+          "text": "titan-ccp-aggregation",
+          "value": "titan-ccp-aggregation"
+        },
+        "datasource": "Prometheus",
+        "definition": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+        "hide": 0,
+        "includeAll": false,
+        "label": "Job",
+        "multi": false,
+        "name": "Job",
+        "options": [
+          {
+            "selected": true,
+            "text": "titan-ccp-aggregation",
+            "value": "titan-ccp-aggregation"
+          }
+        ],
+        "query": "label_values(kafka_consumer_consumer_fetch_manager_metrics_records_lag, job)",
+        "refresh": 0,
+        "regex": "",
+        "skipUrlSync": false,
+        "sort": 0,
+        "tagValuesQuery": "",
+        "tags": [],
+        "tagsQuery": "",
+        "type": "query",
+        "useTags": false
+      }
+    ]
+  },
+  "time": {
+    "from": "now-1h",
+    "to": "now"
+  },
+  "timepicker": {
+    "refresh_intervals": [
+      "5s",
+      "10s",
+      "30s",
+      "1m",
+      "5m",
+      "15m",
+      "30m",
+      "1h",
+      "2h",
+      "1d"
+    ]
+  },
+  "timezone": "",
+  "title": "Scalability Benchmarking",
+  "uid": "dad0CNlZz",
+  "version": 25
+}
\ No newline at end of file
diff --git a/execution/infrastructure/grafana/values.yaml b/execution/infrastructure/grafana/values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..16f075745660e9fd522f37108d0479a8b6f997b4
--- /dev/null
+++ b/execution/infrastructure/grafana/values.yaml
@@ -0,0 +1,55 @@
+image:
+  repository: grafana/grafana
+  tag: 6.7.3
+  pullPolicy: IfNotPresent
+
+# Administrator credentials when not using an existing secret (see below)
+adminUser: admin
+adminPassword: admin
+
+
+## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
+## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
+sidecar:
+  image: kiwigrid/k8s-sidecar:0.1.99
+  imagePullPolicy: IfNotPresent
+  dashboards:
+    enabled: true
+    SCProvider: true
+    # label that the configmaps with dashboards are marked with
+    label: grafana_dashboard
+    # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
+    folder: /tmp/dashboards
+    # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
+    defaultFolderName: null
+    # If specified, the sidecar will search for dashboard config-maps inside this namespace.
+    # Otherwise the namespace in which the sidecar is running will be used.
+    # It's also possible to specify ALL to search in all namespaces
+    searchNamespace: null
+    # provider configuration that lets grafana manage the dashboards
+    provider:
+      # name of the provider, should be unique
+      name: sidecarProvider
+      # orgid as configured in grafana
+      orgid: 1
+      # folder in which the dashboards should be imported in grafana
+      folder: ''
+      # type of the provider
+      type: file
+      # disableDelete to activate a import-only behaviour
+      disableDelete: false
+      # allow updating provisioned dashboards from the UI
+      allowUiUpdates: true
+  datasources:
+    enabled: true
+    # label that the configmaps with datasources are marked with
+    label: grafana_datasource
+    # If specified, the sidecar will search for datasource config-maps inside this namespace.
+    # Otherwise the namespace in which the sidecar is running will be used.
+    # It's also possible to specify ALL to search in all namespaces
+    searchNamespace: default
+
+
+service:
+  nodePort: 31199
+  type: NodePort
\ No newline at end of file
diff --git a/execution/infrastructure/kafka-lag-exporter/install.sh b/execution/infrastructure/kafka-lag-exporter/install.sh
new file mode 100644
index 0000000000000000000000000000000000000000..525e91be95a5f31b1418e3a697d855ddd2bab3dd
--- /dev/null
+++ b/execution/infrastructure/kafka-lag-exporter/install.sh
@@ -0,0 +1,6 @@
+helm install kafka-lag-exporter https://github.com/lightbend/kafka-lag-exporter/releases/download/v0.6.0/kafka-lag-exporter-0.6.0.tgz \
+  --set clusters\[0\].name=my-confluent-cp-kafka \
+  --set clusters\[0\].bootstrapBrokers=my-confluent-cp-kafka:9092 \
+  --set pollIntervalSeconds=15 #5
+
+# Helm could also create ServiceMonitor
diff --git a/execution/infrastructure/kafka-lag-exporter/service-monitor.yaml b/execution/infrastructure/kafka-lag-exporter/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..141dd96f9bb3973bb0f22a4aa04c29768e0a1376
--- /dev/null
+++ b/execution/infrastructure/kafka-lag-exporter/service-monitor.yaml
@@ -0,0 +1,15 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: kafka-lag-exporter
+    appScope: titan-ccp
+  name: kafka-lag-exporter
+spec:
+  selector:
+    matchLabels:
+      #app: cp-kafka
+      jobLabel: kafka-lag-exporter
+  endpoints:
+    - port: http
+      interval: 5s
\ No newline at end of file
diff --git a/execution/infrastructure/kafka-lag-exporter/values.yaml b/execution/infrastructure/kafka-lag-exporter/values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8e31cc9fdf31f0a5d3b4542c3a227e4de212f6b2
--- /dev/null
+++ b/execution/infrastructure/kafka-lag-exporter/values.yaml
@@ -0,0 +1,14 @@
+clusters:
+  - name: "my-confluent-cp-kafka"
+    bootstrapBrokers: "my-confluent-cp-kafka:9092"
+
+## The interval between refreshing metrics
+pollIntervalSeconds: 15
+
+prometheus:
+  serviceMonitor:
+    enabled: false
+    interval: "30s"
+    # service monitor label selectors: https://github.com/helm/charts/blob/f5a751f174263971fafd21eee4e35416d6612a3d/stable/prometheus-operator/templates/prometheus/prometheus.yaml#L74
+    # additionalLabels:
+    #   prometheus: k8s
diff --git a/execution/infrastructure/kafka/service-monitor.yaml b/execution/infrastructure/kafka/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a37ac8d7ae51ef283b2b6e50d4dd6eae31a19d58
--- /dev/null
+++ b/execution/infrastructure/kafka/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: cp-kafka
+    appScope: titan-ccp
+  name: kafka
+spec:
+  selector:
+    matchLabels:
+      app: cp-kafka
+  endpoints:
+    - port: metrics
+      interval: 7s
\ No newline at end of file
diff --git a/execution/infrastructure/kafka/values.yaml b/execution/infrastructure/kafka/values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a33a6f21f8529377162704fbabc7a381706ad64e
--- /dev/null
+++ b/execution/infrastructure/kafka/values.yaml
@@ -0,0 +1,174 @@
+## ------------------------------------------------------
+## Zookeeper
+## ------------------------------------------------------
+cp-zookeeper:
+  enabled: true
+  servers: 3
+  image: confluentinc/cp-zookeeper
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  persistence:
+    enabled: false
+    ## The size of the PersistentVolume to allocate to each Zookeeper Pod in the StatefulSet. For
+    ## production servers this number should likely be much larger.
+    ##
+    ## Size for Data dir, where ZooKeeper will store the in-memory database snapshots.
+    dataDirSize: 10Gi
+    # dataDirStorageClass: ""
+
+    ## Size for data log dir, which is a dedicated log device to be used, and helps avoid competition between logging and snaphots.
+    dataLogDirSize: 10Gi
+    # dataLogDirStorageClass: ""
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+## ------------------------------------------------------
+## Kafka
+## ------------------------------------------------------
+cp-kafka:
+  enabled: true
+  brokers: 10
+  image: confluentinc/cp-enterprise-kafka
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  persistence:
+    enabled: false
+    # storageClass: ""
+    size: 5Gi
+    disksPerBroker: 1
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+  configurationOverrides:
+    #"offsets.topic.replication.factor": "3"
+    "message.max.bytes": "134217728" # 128 MB
+    "replica.fetch.max.bytes": "134217728" # 128 MB
+    # "default.replication.factor": 3
+    # "min.insync.replicas": 2
+    # "auto.create.topics.enable": false
+    "log.retention.ms": "10000" # 10s
+    "metrics.sample.window.ms": "5000" #5s
+
+## ------------------------------------------------------
+## Schema Registry
+## ------------------------------------------------------
+cp-schema-registry:
+  enabled: true
+  image: confluentinc/cp-schema-registry
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+## ------------------------------------------------------
+## REST Proxy
+## ------------------------------------------------------
+cp-kafka-rest:
+  enabled: false
+  image: confluentinc/cp-kafka-rest
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+## ------------------------------------------------------
+## Kafka Connect
+## ------------------------------------------------------
+cp-kafka-connect:
+  enabled: false
+  image: confluentinc/cp-kafka-connect
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+## ------------------------------------------------------
+## KSQL Server
+## ------------------------------------------------------
+cp-ksql-server:
+  enabled: false
+  image: confluentinc/cp-ksql-server
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  ksql:
+    headless: false
+
+## ------------------------------------------------------
+## Control Center
+## ------------------------------------------------------
+cp-control-center:
+  enabled: false
+  image: confluentinc/cp-enterprise-control-center
+  imageTag: 5.2.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
diff --git a/execution/infrastructure/prometheus/cluster-role-binding.yaml b/execution/infrastructure/prometheus/cluster-role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5369e02aac84440053b3be5485f0644419d981d1
--- /dev/null
+++ b/execution/infrastructure/prometheus/cluster-role-binding.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: prometheus
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: prometheus
+subjects:
+- kind: ServiceAccount
+  name: prometheus
+  namespace: titan-scalability
\ No newline at end of file
diff --git a/execution/infrastructure/prometheus/cluster-role.yaml b/execution/infrastructure/prometheus/cluster-role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..299ebd0a3be0b53adcadc514b2ef6d1d15efc98d
--- /dev/null
+++ b/execution/infrastructure/prometheus/cluster-role.yaml
@@ -0,0 +1,18 @@
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: prometheus
+rules:
+- apiGroups: [""]
+  resources:
+  - nodes
+  - services
+  - endpoints
+  - pods
+  verbs: ["get", "list", "watch"]
+- apiGroups: [""]
+  resources:
+  - configmaps
+  verbs: ["get"]
+- nonResourceURLs: ["/metrics"]
+  verbs: ["get"]
\ No newline at end of file
diff --git a/execution/infrastructure/prometheus/helm-values.yaml b/execution/infrastructure/prometheus/helm-values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bf503fe483e918ac7a6a7dc8722ea06cfd3aef6c
--- /dev/null
+++ b/execution/infrastructure/prometheus/helm-values.yaml
@@ -0,0 +1,41 @@
+alertmanager:
+  enabled: false
+
+grafana:
+  enabled: false
+
+kubeApiServer:
+  enabled: false
+
+kubelet:
+  enabled: false
+
+kubeControllerManager:
+  enabled: false
+
+coreDns:
+  enabled: false
+
+kubeDns:
+  enabled: false
+ 
+kubeEtcd:
+  enabled: false
+
+kubeScheduler:
+  enabled: false
+
+kubeProxy:
+  enabled: false
+
+kubeStateMetrics:
+  enabled: false
+ 
+nodeExporter:
+  enabled: false
+
+prometheusOperator:
+  enabled: true
+
+prometheus:
+  enabled: false
diff --git a/execution/infrastructure/prometheus/prometheus.yaml b/execution/infrastructure/prometheus/prometheus.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2cec39877386cc06408783c372521025883310da
--- /dev/null
+++ b/execution/infrastructure/prometheus/prometheus.yaml
@@ -0,0 +1,15 @@
+apiVersion: monitoring.coreos.com/v1
+kind: Prometheus
+metadata:
+  name: prometheus
+spec:
+  serviceAccountName: prometheus
+  serviceMonitorSelector:
+    matchLabels:
+      #app: cp-kafka
+      appScope: titan-ccp
+  resources:
+    requests:
+      memory: 400Mi
+  #scrapeInterval: 1s
+  enableAdminAPI: true
\ No newline at end of file
diff --git a/execution/infrastructure/prometheus/service-account.yaml b/execution/infrastructure/prometheus/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f671fc5ab75c995b4c172089b57a9c72860d5cb0
--- /dev/null
+++ b/execution/infrastructure/prometheus/service-account.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: prometheus
\ No newline at end of file
diff --git a/execution/lag-trend-graph.ipynb b/execution/lag-trend-graph.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..71cd54ceefbcce4548e118a9dd0ab484df52a207
--- /dev/null
+++ b/execution/lag-trend-graph.ipynb
@@ -0,0 +1,147 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import pandas as pd\n",
+    "import numpy as np\n",
+    "from sklearn.linear_model import LinearRegression\n",
+    "import matplotlib.pyplot as plt\n",
+    "import matplotlib"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "directory = ''\n",
+    "filename = 'xxx_totallag.csv'\n",
+    "warmup_sec = 60\n",
+    "threshold = 2000 #slope"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "df = pd.read_csv(os.path.join(directory, filename))\n",
+    "\n",
+    "input = df.iloc[::3]\n",
+    "#print(input)\n",
+    "input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']\n",
+    "#print(input)\n",
+    "#print(input.iloc[0, 'timestamp'])\n",
+    "regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up\n",
+    "#regress = input\n",
+    "\n",
+    "#input.plot(kind='line',x='timestamp',y='value',color='red')\n",
+    "#plt.show()\n",
+    "\n",
+    "X = regress.iloc[:, 4].values.reshape(-1, 1)  # values converts it into a numpy array\n",
+    "Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column\n",
+    "linear_regressor = LinearRegression()  # create object for the class\n",
+    "linear_regressor.fit(X, Y)  # perform linear regression\n",
+    "Y_pred = linear_regressor.predict(X)  # make predictions"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(linear_regressor.coef_)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plt.style.use('ggplot')\n",
+    "plt.rcParams['axes.facecolor']='w'\n",
+    "plt.rcParams['axes.edgecolor']='555555'\n",
+    "#plt.rcParams['ytick.color']='black'\n",
+    "plt.rcParams['grid.color']='dddddd'\n",
+    "plt.rcParams['axes.spines.top']='false'\n",
+    "plt.rcParams['axes.spines.right']='false'\n",
+    "plt.rcParams['legend.frameon']='true'\n",
+    "plt.rcParams['legend.framealpha']='1'\n",
+    "plt.rcParams['legend.edgecolor']='1'\n",
+    "plt.rcParams['legend.borderpad']='1'\n",
+    "\n",
+    "\n",
+    "#filename = f\"exp{exp_id}_{benchmark}_{dim_value}_{instances}\"\n",
+    "\n",
+    "\n",
+    "t_warmup = input.loc[input['sec_start'] <= warmup_sec].iloc[:, 4].values\n",
+    "y_warmup = input.loc[input['sec_start'] <= warmup_sec].iloc[:, 3].values\n",
+    "\n",
+    "plt.figure()\n",
+    "#plt.figure(figsize=(4, 3))\n",
+    "\n",
+    "plt.plot(X, Y, c=\"#348ABD\", label=\"observed\")\n",
+    "#plt.plot(t_warmup, y_warmup)\n",
+    "\n",
+    "plt.plot(X, Y_pred, c=\"#E24A33\", label=\"trend\") # color='red')\n",
+    "\n",
+    "#348ABD, 7A68A6, A60628, 467821, CF4457, 188487, E24A33\n",
+    "\n",
+    "plt.gca().yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, pos: '%1.0fK' % (x * 1e-3)))\n",
+    "plt.ylabel('queued messages')\n",
+    "plt.xlabel('seconds since start')\n",
+    "plt.legend()\n",
+    "#ax.set_ylim(ymin=0)\n",
+    "#ax.set_xlim(xmin=0)\n",
+    "\n",
+    "plt.savefig(\"plot.pdf\", bbox_inches='tight')\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python",
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "version": "3.7.0-final"
+  },
+  "orig_nbformat": 2,
+  "file_extension": ".py",
+  "mimetype": "text/x-python",
+  "name": "python",
+  "npconvert_exporter": "python",
+  "pygments_lexer": "ipython3",
+  "version": 3,
+  "kernelspec": {
+   "name": "python37064bitvenvvenv469ea2e0a7854dc7b367eee45386afee",
+   "display_name": "Python 3.7.0 64-bit ('.venv': venv)"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
index f2690ab13b072171020cd5b27a55d6260b9b9084..c529853fd423babf0783331b02810f4e892af357 100644
--- a/execution/lag_analysis.py
+++ b/execution/lag_analysis.py
@@ -50,6 +50,8 @@ for result in results:
 
 df = pd.DataFrame(d)
 
+# Do some analysis
+
 input = df.loc[df['topic'] == "input"]
 
 #input.plot(kind='line',x='timestamp',y='value',color='red')
@@ -83,6 +85,30 @@ plt.savefig(f"{filename}_plot.png")
 df.to_csv(f"{filename}_values.csv")
 
 
+# Load total lag count
+
+response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={
+    'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
+    'start': start.isoformat(),
+    'end': end.isoformat(),
+    'step': '5s'})
+
+results = response.json()['data']['result']
+
+d = []
+
+for result in results:
+    #print(result['metric']['topic'])
+    group = result['metric']['group']
+    for value in result['values']:
+        #print(value)
+        d.append({'group': group, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+
+df = pd.DataFrame(d)
+
+df.to_csv(f"{filename}_totallag.csv")
+
+
 # Load partition count
 
 response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={
diff --git a/execution/requirements.txt b/execution/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..17f29b0b16a3f130399612c7bffd3ce12896c946
--- /dev/null
+++ b/execution/requirements.txt
@@ -0,0 +1,62 @@
+attrs==19.3.0
+backcall==0.1.0
+bleach==3.1.1
+certifi==2019.11.28
+chardet==3.0.4
+cycler==0.10.0
+decorator==4.4.2
+defusedxml==0.6.0
+entrypoints==0.3
+idna==2.9
+importlib-metadata==1.5.0
+ipykernel==5.1.4
+ipython==7.13.0
+ipython-genutils==0.2.0
+ipywidgets==7.5.1
+jedi==0.16.0
+Jinja2==2.11.1
+joblib==0.14.1
+jsonschema==3.2.0
+jupyter==1.0.0
+jupyter-client==6.0.0
+jupyter-console==6.1.0
+jupyter-core==4.6.3
+kiwisolver==1.1.0
+MarkupSafe==1.1.1
+matplotlib==3.2.0
+mistune==0.8.4
+nbconvert==5.6.1
+nbformat==5.0.4
+notebook==6.0.3
+numpy==1.18.1
+pandas==1.0.1
+pandocfilters==1.4.2
+parso==0.6.2
+pexpect==4.8.0
+pickleshare==0.7.5
+prometheus-client==0.7.1
+prompt-toolkit==3.0.4
+ptyprocess==0.6.0
+Pygments==2.6.1
+pyparsing==2.4.6
+pyrsistent==0.15.7
+python-dateutil==2.8.1
+pytz==2019.3
+pyzmq==19.0.0
+qtconsole==4.7.1
+QtPy==1.9.0
+requests==2.23.0
+scikit-learn==0.22.2.post1
+scipy==1.4.1
+Send2Trash==1.5.0
+six==1.14.0
+sklearn==0.0
+terminado==0.8.3
+testpath==0.4.4
+tornado==6.0.4
+traitlets==4.3.3
+urllib3==1.25.8
+wcwidth==0.1.8
+webencodings==0.5.1
+widgetsnbextension==3.5.1
+zipp==3.1.0
diff --git a/execution/run_loop.sh b/execution/run_loop.sh
index 04664a7cb4a88072ed3d0bca21297ac5b0f757ef..e63c0ecdfc54d27456afd720cc66303bfb143b28 100755
--- a/execution/run_loop.sh
+++ b/execution/run_loop.sh
@@ -3,7 +3,11 @@
 UC=$1
 IFS=', ' read -r -a DIM_VALUES <<< "$2"
 IFS=', ' read -r -a REPLICAS <<< "$3"
-PARTITIONS=$4
+PARTITIONS=${4:-40}
+CPU_LIMIT=${5:-1000m}
+MEMORY_LIMIT=${6:-4Gi}
+KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
+EXECUTION_MINUTES=${8:-5}
 
 # Get and increment counter
 EXP_ID=$(cat exp_counter.txt)
@@ -15,6 +19,10 @@ IFS=$', '; echo \
 DIM_VALUES=${DIM_VALUES[*]}
 REPLICAS=${REPLICAS[*]}
 PARTITIONS=$PARTITIONS
+CPU_LIMIT=$CPU_LIMIT
+MEMORY_LIMIT=$MEMORY_LIMIT
+KAFKA_STREAMS_COMMIT_INTERVAL_MS=$KAFKA_STREAMS_COMMIT_INTERVAL_MS
+EXECUTION_MINUTES=$EXECUTION_MINUTES
 " >> "exp${EXP_ID}_uc${UC}_meta.txt"
 
 SUBEXPERIMENTS=$((${#DIM_VALUES[@]} * ${#REPLICAS[@]}))
@@ -27,7 +35,7 @@ do
     do
         SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1))
         echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA"
-        ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS
+        ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
         sleep 10s
     done
 done
diff --git a/execution/run_uc1-new.sh b/execution/run_uc1-new.sh
index 540f752b8bca855caef8fc736c5cff05ca6e3b6a..0edb75d002861393ce9a4b1b59c21e5871c651eb 100755
--- a/execution/run_uc1-new.sh
+++ b/execution/run_uc1-new.sh
@@ -3,27 +3,40 @@
 EXP_ID=$1
 DIM_VALUE=$2
 INSTANCES=$3
-PARTITIONS=$4
-EXECUTION_MINUTES=5
+PARTITIONS=${4:-40}
+CPU_LIMIT=${5:-1000m}
+MEMORY_LIMIT=${6:-4Gi}
+KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
+EXECUTION_MINUTES=${8:-5}
 
-# Start up Kafka
-# TODO
+echo "EXP_ID: $EXP_ID"
+echo "DIM_VALUE: $DIM_VALUE"
+echo "INSTANCES: $INSTANCES"
+echo "PARTITIONS: $PARTITIONS"
+echo "CPU_LIMIT: $CPU_LIMIT"
+echo "MEMORY_LIMIT: $MEMORY_LIMIT"
+echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
 
 # Create Topics
 #PARTITIONS=40
 #kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
-echo "Print topics:"
-kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p'
 PARTITIONS=$PARTITIONS
 kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
 
 # Start workload generator
 NUM_SENSORS=$DIM_VALUE
-sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc1-workload-generator/deployment.yaml | kubectl apply -f -
+WL_MAX_RECORDS=150000
+WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
+
+WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 
 # Start application
 REPLICAS=$INSTANCES
-kubectl apply -f uc1-application/aggregation-deployment.yaml
+#kubectl apply -f uc3-application/aggregation-deployment.yaml
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
+echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
@@ -35,8 +48,12 @@ python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES
 deactivate
 
 # Stop wl and app
-kubectl delete -f uc1-workload-generator/deployment.yaml
-kubectl delete -f uc1-application/aggregation-deployment.yaml
+#kubectl delete -f uc1-workload-generator/deployment.yaml
+#sed "s/{{INSTANCES}}/1/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
+#sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
+#kubectl delete -f uc1-application/aggregation-deployment.yaml
+echo "$APPLICATION_YAML" | kubectl delete -f -
 
 
 # Delete topics instead of Kafka
@@ -49,18 +66,18 @@ kubectl delete -f uc1-application/aggregation-deployment.yaml
 
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
 echo "Finished execution, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
-while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|titan-.*'"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
-    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
     # Sometimes a second deletion seems to be required
 done
 echo "Finish topic deletion, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc2-new.sh b/execution/run_uc2-new.sh
index 6742a7316c0c4ce2cfb506eac979fcc20c0c2374..503c4ffa0d1f7f4d785eb0fb1743fc305fc0732f 100755
--- a/execution/run_uc2-new.sh
+++ b/execution/run_uc2-new.sh
@@ -3,10 +3,20 @@
 EXP_ID=$1
 DIM_VALUE=$2
 INSTANCES=$3
-PARTITIONS=$4
-EXECUTION_MINUTES=5
+PARTITIONS=${4:-40}
+CPU_LIMIT=${5:-1000m}
+MEMORY_LIMIT=${6:-4Gi}
+KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
+EXECUTION_MINUTES=${8:-5}
 
-# Maybe start up Kafka
+echo "EXP_ID: $EXP_ID"
+echo "DIM_VALUE: $DIM_VALUE"
+echo "INSTANCES: $INSTANCES"
+echo "PARTITIONS: $PARTITIONS"
+echo "CPU_LIMIT: $CPU_LIMIT"
+echo "MEMORY_LIMIT: $MEMORY_LIMIT"
+echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
 
 # Create Topics
 #PARTITIONS=40
@@ -20,7 +30,9 @@ sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deploy
 
 # Start application
 REPLICAS=$INSTANCES
-kubectl apply -f uc2-application/aggregation-deployment.yaml
+#kubectl apply -f uc2-application/aggregation-deployment.yaml
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
+echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
@@ -33,7 +45,8 @@ deactivate
 
 # Stop wl and app
 kubectl delete -f uc2-workload-generator/deployment.yaml
-kubectl delete -f uc2-application/aggregation-deployment.yaml
+#kubectl delete -f uc2-application/aggregation-deployment.yaml
+echo "$APPLICATION_YAML" | kubectl delete -f -
 
 
 # Delete topics instead of Kafka
@@ -46,18 +59,18 @@ kubectl delete -f uc2-application/aggregation-deployment.yaml
 
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
 echo "Finished execution, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
-while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|titan-.*'"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
-    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
     # Sometimes a second deletion seems to be required
 done
 echo "Finish topic deletion, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc3-new.sh b/execution/run_uc3-new.sh
index c5c0f9eba070d17a71866eab46768721399a2724..b8c7c20a1600ecf9c78ef3743ae46fb47ae04c17 100755
--- a/execution/run_uc3-new.sh
+++ b/execution/run_uc3-new.sh
@@ -3,10 +3,20 @@
 EXP_ID=$1
 DIM_VALUE=$2
 INSTANCES=$3
-PARTITIONS=$4
-EXECUTION_MINUTES=5
+PARTITIONS=${4:-40}
+CPU_LIMIT=${5:-1000m}
+MEMORY_LIMIT=${6:-4Gi}
+KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
+EXECUTION_MINUTES=${8:-5}
 
-# Maybe start up Kafka
+echo "EXP_ID: $EXP_ID"
+echo "DIM_VALUE: $DIM_VALUE"
+echo "INSTANCES: $INSTANCES"
+echo "PARTITIONS: $PARTITIONS"
+echo "CPU_LIMIT: $CPU_LIMIT"
+echo "MEMORY_LIMIT: $MEMORY_LIMIT"
+echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
 
 # Create Topics
 #PARTITIONS=40
@@ -16,11 +26,17 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
 
 # Start workload generator
 NUM_SENSORS=$DIM_VALUE
-sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc3-workload-generator/deployment.yaml | kubectl apply -f -
+WL_MAX_RECORDS=150000
+WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
+
+WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc3-workload-generator/deployment.yaml)
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 
 # Start application
 REPLICAS=$INSTANCES
-kubectl apply -f uc3-application/aggregation-deployment.yaml
+#kubectl apply -f uc3-application/aggregation-deployment.yaml
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
+echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
@@ -32,8 +48,13 @@ python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES
 deactivate
 
 # Stop wl and app
-kubectl delete -f uc3-workload-generator/deployment.yaml
-kubectl delete -f uc3-application/aggregation-deployment.yaml
+#kubectl delete -f uc3-workload-generator/deployment.yaml
+#sed "s/{{INSTANCES}}/1/g" uc3-workload-generator/deployment.yaml | kubectl delete -f -
+echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
+#kubectl delete -f uc1-application/aggregation-deployment.yaml
+#sed "s/{{CPU_LIMIT}}/1000m/g; s/{{MEMORY_LIMIT}}/4Gi/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/100/g" uc3-application/aggregation-deployment.yaml | kubectl delete -f -
+echo "$APPLICATION_YAML" | kubectl delete -f -
+
 
 
 # Delete topics instead of Kafka
@@ -46,18 +67,18 @@ kubectl delete -f uc3-application/aggregation-deployment.yaml
 
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
 echo "Finished execution, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
-while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|titan-.*'"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
-    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
     # Sometimes a second deletion seems to be required
 done
 echo "Finish topic deletion, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 echo "Exiting script"
 
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
diff --git a/execution/run_uc4-new.sh b/execution/run_uc4-new.sh
index 607aecfcfc8a7799dd641d9bc8ce105eda523a24..ee3aaae98f151ef22088608ee970d3f8d66989e1 100755
--- a/execution/run_uc4-new.sh
+++ b/execution/run_uc4-new.sh
@@ -3,10 +3,20 @@
 EXP_ID=$1
 DIM_VALUE=$2
 INSTANCES=$3
-PARTITIONS=$4
-EXECUTION_MINUTES=5
+PARTITIONS=${4:-40}
+CPU_LIMIT=${5:-1000m}
+MEMORY_LIMIT=${6:-4Gi}
+KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
+EXECUTION_MINUTES=${8:-5}
 
-# Maybe start up Kafka
+echo "EXP_ID: $EXP_ID"
+echo "DIM_VALUE: $DIM_VALUE"
+echo "INSTANCES: $INSTANCES"
+echo "PARTITIONS: $PARTITIONS"
+echo "CPU_LIMIT: $CPU_LIMIT"
+echo "MEMORY_LIMIT: $MEMORY_LIMIT"
+echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
+echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
 
 # Create Topics
 #PARTITIONS=40
@@ -22,8 +32,10 @@ sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml |
 # Start application
 REPLICAS=$INSTANCES
 #AGGREGATION_DURATION_DAYS=$DIM_VALUE
-kubectl apply -f uc4-application/aggregation-deployment.yaml
+#kubectl apply -f uc4-application/aggregation-deployment.yaml
 #sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
+echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
 # Execute for certain time
@@ -36,7 +48,8 @@ deactivate
 
 # Stop wl and app
 kubectl delete -f uc4-workload-generator/deployment.yaml
-kubectl delete -f uc4-application/aggregation-deployment.yaml
+#kubectl delete -f uc4-application/aggregation-deployment.yaml
+echo "$APPLICATION_YAML" | kubectl delete -f -
 
 
 # Delete topics instead of Kafka
@@ -49,24 +62,19 @@ kubectl delete -f uc4-application/aggregation-deployment.yaml
 
 #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
 echo "Finished execution, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
-while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
 do
-    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
+    kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|titan-.*'"
     echo "Wait for topic deletion"
     sleep 5s
     #echo "Finished waiting, print topics:"
-    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+    #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
     # Sometimes a second deletion seems to be required
 done
 echo "Finish topic deletion, print topics:"
-#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
+#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
 echo "Exiting script"
 
-
-#TODO maybe delete schemas
-#https://docs.confluent.io/current/schema-registry/schema-deletion-guidelines.html
-#curl -X DELETE http://localhost:8081/subjects/Kafka-value
-
 KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
 kubectl delete pod $KAFKA_LAG_EXPORTER_POD
diff --git a/execution/scalability-graph-finish.ipynb b/execution/scalability-graph-finish.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..ffcf33b6b044a7f5f354b682a5cafc3c3f42e2f0
--- /dev/null
+++ b/execution/scalability-graph-finish.ipynb
@@ -0,0 +1,104 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import pandas as pd\n",
+    "from functools import reduce\n",
+    "import matplotlib.pyplot as plt"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "directory = '../results-inst'\n",
+    "\n",
+    "experiments = {\n",
+    "    'exp1003': 'exp1003',\n",
+    "    'exp1025': 'exp1025',\n",
+    "}\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dataframes = [pd.read_csv(os.path.join(directory, f'{v}_min-suitable-instances.csv')).set_index('dim_value').rename(columns={\"instances\": k}) for k, v in experiments.items()]\n",
+    "\n",
+    "df = reduce(lambda df1,df2: df1.join(df2,how='outer'), dataframes)\n",
+    "\n",
+    "df"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "plt.style.use('ggplot')\n",
+    "plt.rcParams['axes.facecolor']='w'\n",
+    "plt.rcParams['axes.edgecolor']='555555'\n",
+    "#plt.rcParams['ytick.color']='black'\n",
+    "plt.rcParams['grid.color']='dddddd'\n",
+    "plt.rcParams['axes.spines.top']='false'\n",
+    "plt.rcParams['axes.spines.right']='false'\n",
+    "plt.rcParams['legend.frameon']='true'\n",
+    "plt.rcParams['legend.framealpha']='1'\n",
+    "plt.rcParams['legend.edgecolor']='1'\n",
+    "plt.rcParams['legend.borderpad']='1'\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "\n",
+    "plt.figure() \n",
+    "ax = df.plot(kind='line', marker='o')\n",
+    "#ax = df.plot(kind='line',x='dim_value', legend=False, use_index=True)\n",
+    "ax.set_ylabel('instances')\n",
+    "ax.set_xlabel('data sources')\n",
+    "ax.set_ylim(ymin=0)\n",
+    "#ax.set_xlim(xmin=0)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python",
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "version": "3.7.0-final"
+  },
+  "orig_nbformat": 2,
+  "file_extension": ".py",
+  "mimetype": "text/x-python",
+  "name": "python",
+  "npconvert_exporter": "python",
+  "pygments_lexer": "ipython3",
+  "version": 3,
+  "kernelspec": {
+   "name": "python37064bitvenvvenv469ea2e0a7854dc7b367eee45386afee",
+   "display_name": "Python 3.7.0 64-bit ('.venv': venv)"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/execution/scalability-graph.ipynb b/execution/scalability-graph.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..752c0bebc901e756e18d4b11fc0d8ae02cddcf13
--- /dev/null
+++ b/execution/scalability-graph.ipynb
@@ -0,0 +1,293 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "print(\"hello\")"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "import requests\n",
+    "from datetime import datetime, timedelta, timezone\n",
+    "import pandas as pd\n",
+    "from sklearn.linear_model import LinearRegression\n",
+    "import matplotlib.pyplot as plt"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "os.getcwd()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "exp_id = 1003\n",
+    "warmup_sec = 60\n",
+    "warmup_partitions_sec = 120\n",
+    "threshold = 2000 #slope\n",
+    "directory = '../results'\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {
+    "tags": [
+     "outputPrepend",
+     "outputPrepend"
+    ]
+   },
+   "outputs": [],
+   "source": [
+    "#exp_id = 35\n",
+    "\n",
+    "#os.chdir(\"./results-final\")\n",
+    "\n",
+    "raw_runs = []\n",
+    "\n",
+    "filenames = [filename for filename in os.listdir(directory) if filename.startswith(f\"exp{exp_id}\") and filename.endswith(\"totallag.csv\")]\n",
+    "for filename in filenames:\n",
+    "    #print(filename)\n",
+    "    run_params = filename[:-4].split(\"_\")\n",
+    "    dim_value = run_params[2]\n",
+    "    instances = run_params[3]\n",
+    "\n",
+    "    df = pd.read_csv(os.path.join(directory, filename))\n",
+    "    #input = df.loc[df['topic'] == \"input\"]\n",
+    "    input = df\n",
+    "    #print(input)\n",
+    "    input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']\n",
+    "    #print(input)\n",
+    "    #print(input.iloc[0, 'timestamp'])\n",
+    "    regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up\n",
+    "    #regress = input\n",
+    "\n",
+    "    #input.plot(kind='line',x='timestamp',y='value',color='red')\n",
+    "    #plt.show()\n",
+    "\n",
+    "    X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array\n",
+    "    Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column\n",
+    "    linear_regressor = LinearRegression()  # create object for the class\n",
+    "    linear_regressor.fit(X, Y)  # perform linear regression\n",
+    "    Y_pred = linear_regressor.predict(X)  # make predictions\n",
+    "\n",
+    "    trend_slope = linear_regressor.coef_[0][0]\n",
+    "    #print(linear_regressor.coef_)\n",
+    "\n",
+    "    row = {'dim_value': int(dim_value), 'instances': int(instances), 'trend_slope': trend_slope}\n",
+    "    #print(row)\n",
+    "    raw_runs.append(row)\n",
+    "\n",
+    "lags = pd.DataFrame(raw_runs)\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "lags.head()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "raw_partitions = []\n",
+    "\n",
+    "filenames = [filename for filename in os.listdir(directory) if filename.startswith(f\"exp{exp_id}\") and filename.endswith(\"partitions.csv\")]\n",
+    "for filename in filenames:\n",
+    "    #print(filename)\n",
+    "    run_params = filename[:-4].split(\"_\")\n",
+    "    dim_value = run_params[2]\n",
+    "    instances = run_params[3]\n",
+    "\n",
+    "    df = pd.read_csv(os.path.join(directory, filename))\n",
+    "    #input = df.loc[df['topic'] == \"input\"]\n",
+    "    input = df\n",
+    "    #print(input)\n",
+    "    input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']\n",
+    "    #print(input)\n",
+    "    #print(input.iloc[0, 'timestamp'])\n",
+    "    input = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up\n",
+    "    #regress = input\n",
+    "\n",
+    "    input = input.loc[input['topic'] >= 'input']\n",
+    "    mean = input['value'].mean()\n",
+    "\n",
+    "    #input.plot(kind='line',x='timestamp',y='value',color='red')\n",
+    "    #plt.show()\n",
+    "\n",
+    "\n",
+    "    row = {'dim_value': int(dim_value), 'instances': int(instances), 'partitions': mean}\n",
+    "    #print(row)\n",
+    "    raw_partitions.append(row)\n",
+    "\n",
+    "\n",
+    "partitions = pd.DataFrame(raw_partitions)\n",
+    "\n",
+    "#runs = lags.join(partitions.set_index(['dim_value', 'instances']), on=['dim_value', 'instances'])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "raw_obs_instances = []\n",
+    "\n",
+    "filenames = [filename for filename in os.listdir(directory) if filename.startswith(f\"exp{exp_id}\") and filename.endswith(\"instances.csv\")]\n",
+    "for filename in filenames:\n",
+    "    run_params = filename[:-4].split(\"_\")\n",
+    "    dim_value = run_params[2]\n",
+    "    instances = run_params[3]\n",
+    "\n",
+    "    df = pd.read_csv(os.path.join(directory, filename))\n",
+    "\n",
+    "    if df.empty:\n",
+    "        continue\n",
+    "\n",
+    "    #input = df.loc[df['topic'] == \"input\"]\n",
+    "    input = df\n",
+    "    #print(input)\n",
+    "    input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']\n",
+    "    #print(input)\n",
+    "    #print(input.iloc[0, 'timestamp'])\n",
+    "    input = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up\n",
+    "    #regress = input\n",
+    "\n",
+    "    #input = input.loc[input['topic'] >= 'input']\n",
+    "    #mean = input['value'].mean()\n",
+    "\n",
+    "    #input.plot(kind='line',x='timestamp',y='value',color='red')\n",
+    "    #plt.show()\n",
+    "\n",
+    "\n",
+    "    #row = {'dim_value': int(dim_value), 'instances': int(instances), 'obs_instances': mean}\n",
+    "    #print(row)\n",
+    "    raw_obs_instances.append(row)\n",
+    "\n",
+    "\n",
+    "obs_instances = pd.DataFrame(raw_obs_instances)\n",
+    "\n",
+    "obs_instances.head()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "runs = lags\n",
+    "#runs = lags.join(partitions.set_index(['dim_value', 'instances']), on=['dim_value', 'instances'])#.join(obs_instances.set_index(['dim_value', 'instances']), on=['dim_value', 'instances'])\n",
+    "\n",
+    "#runs[\"failed\"] = runs.apply(lambda row: (abs(row['instances'] - row['obs_instances']) / row['instances']) > 0.1, axis=1)\n",
+    "\n",
+    "#runs.loc[runs['failed']==True]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "#threshold = 1000\n",
+    "\n",
+    "# Set to true if the trend line has a slope less than \n",
+    "runs[\"suitable\"] =  runs.apply(lambda row: row['trend_slope'] < threshold, axis=1)\n",
+    "\n",
+    "runs.columns = runs.columns.str.strip()\n",
+    "runs.sort_values(by=[\"dim_value\", \"instances\"])"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "filtered = runs[runs.apply(lambda x: x['suitable'], axis=1)]\n",
+    "\n",
+    "grouped = filtered.groupby(['dim_value'])['instances'].min()\n",
+    "min_suitable_instances = grouped.to_frame().reset_index()\n",
+    "\n",
+    "min_suitable_instances"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "min_suitable_instances.to_csv(f'../results-inst/exp{exp_id}_min-suitable-instances.csv', index=False)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "min_suitable_instances.plot(kind='line',x='dim_value',y='instances')\n",
+    "# min_suitable_instances.plot(kind='line',x='dim_value',y='instances', logy=True)\n",
+    "\n",
+    "plt.show()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "language_info": {
+   "name": "python",
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "version": "3.7.0-final"
+  },
+  "orig_nbformat": 2,
+  "file_extension": ".py",
+  "mimetype": "text/x-python",
+  "name": "python",
+  "npconvert_exporter": "python",
+  "pygments_lexer": "ipython3",
+  "version": 3,
+  "kernelspec": {
+   "name": "python37064bitvenvvenv469ea2e0a7854dc7b367eee45386afee",
+   "display_name": "Python 3.7.0 64-bit ('.venv': venv)"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
\ No newline at end of file
diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc1-application/aggregation-deployment.yaml
index cfbc3bcdd85cd3cac605d2251370aec99392b2f3..d5bccca4a72f6a47a855ed8a7ca47fac4a8a19ca 100644
--- a/execution/uc1-application/aggregation-deployment.yaml
+++ b/execution/uc1-application/aggregation-deployment.yaml
@@ -15,7 +15,7 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: uc1-application
-        image: "benediktwetzel/uc1-app:latest"
+        image: "soerenhenning/uc1-app:latest"
         ports:
         - containerPort: 5555
           name: jmx
@@ -23,9 +23,13 @@ spec:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: COMMIT_INTERVAL_MS
-          value: "100"
+          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        resources:
+          limits:
+            memory: "{{MEMORY_LIMIT}}"
+            cpu: "{{CPU_LIMIT}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
diff --git a/execution/uc1-workload-generator/deployment.yaml b/execution/uc1-workload-generator/deployment.yaml
index f82519ebdade4069c11b50ea100c8ddd3ed3cf51..a0fde4bbf9765b2bb56bd36acde430d97169f34b 100644
--- a/execution/uc1-workload-generator/deployment.yaml
+++ b/execution/uc1-workload-generator/deployment.yaml
@@ -1,12 +1,13 @@
 apiVersion: apps/v1
-kind: Deployment
+kind: StatefulSet
 metadata:
   name: titan-ccp-load-generator
 spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  replicas: 1
+  serviceName: titan-ccp-load-generator
+  replicas: {{INSTANCES}}
   template:
     metadata:
       labels:
@@ -15,10 +16,16 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: benediktwetzel/uc1-wg:latest 
+        image: soerenhenning/uc1-wg:latest 
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: NUM_SENSORS
           value: "{{NUM_SENSORS}}"
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: INSTANCES
+          value: "{{INSTANCES}}"
           
\ No newline at end of file
diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml
index 206e2606b165d28fb42b0c6fa7f50b55d6d0d8e5..ce52421731ea5fc044c435ad10adb311e7e7e878 100644
--- a/execution/uc2-application/aggregation-deployment.yaml
+++ b/execution/uc2-application/aggregation-deployment.yaml
@@ -23,9 +23,13 @@ spec:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: COMMIT_INTERVAL_MS
-          value: "10"
+          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        resources:
+          limits:
+            memory: "{{MEMORY_LIMIT}}"
+            cpu: "{{CPU_LIMIT}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml
index cce675eb515fb52435f202bcac734c44c7c36453..0f3327af3119df125e3431574e3e406183abc132 100644
--- a/execution/uc3-application/aggregation-deployment.yaml
+++ b/execution/uc3-application/aggregation-deployment.yaml
@@ -25,9 +25,13 @@ spec:
         - name: KAFKA_WINDOW_DURATION_MINUTES
           value: "1"
         - name: COMMIT_INTERVAL_MS
-          value: "100"
+          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        resources:
+          limits:
+            memory: "{{MEMORY_LIMIT}}"
+            cpu: "{{CPU_LIMIT}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
diff --git a/execution/uc3-workload-generator/deployment.yaml b/execution/uc3-workload-generator/deployment.yaml
index b78653e23d62324d4c88da53c23f75184efaa564..9ecd2b67e757c94221e36edcfcfd43c22782270a 100644
--- a/execution/uc3-workload-generator/deployment.yaml
+++ b/execution/uc3-workload-generator/deployment.yaml
@@ -1,12 +1,13 @@
 apiVersion: apps/v1
-kind: Deployment
+kind: StatefulSet
 metadata:
   name: titan-ccp-load-generator
 spec:
   selector:
     matchLabels:
       app: titan-ccp-load-generator
-  replicas: 1
+  serviceName: titan-ccp-load-generator
+  replicas: {{INSTANCES}}
   template:
     metadata:
       labels:
@@ -15,10 +16,16 @@ spec:
       terminationGracePeriodSeconds: 0
       containers:
       - name: workload-generator
-        image: benediktwetzel/uc3-wg:latest 
+        image: soerenhenning/uc3-wg:latest 
         env:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: NUM_SENSORS
           value: "{{NUM_SENSORS}}"
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: INSTANCES
+          value: "{{INSTANCES}}"
           
\ No newline at end of file
diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml
index a0d606b0e21b13d4efe04cacd68d9bd5b7dafd65..f7a750c790b6a9eab8453fa91e05176de665104e 100644
--- a/execution/uc4-application/aggregation-deployment.yaml
+++ b/execution/uc4-application/aggregation-deployment.yaml
@@ -23,13 +23,17 @@ spec:
         - name: KAFKA_BOOTSTRAP_SERVERS
           value: "my-confluent-cp-kafka:9092"
         - name: AGGREGATION_DURATION_DAYS
-          value: "7" #AGGREGATION_DURATION_DAYS
+          value: "3" #AGGREGATION_DURATION_DAYS
         - name: AGGREGATION_DURATION_ADVANCE
           value: "1"
         - name: COMMIT_INTERVAL_MS
-          value: "100"
+          value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
         - name: JAVA_OPTS
           value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+        resources:
+          limits:
+            memory: "{{MEMORY_LIMIT}}"
+            cpu: "{{CPU_LIMIT}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
diff --git a/uc1-application/build.gradle b/uc1-application/build.gradle
index 3fe8803745e42682cf43d068779e63183d62c792..ec18bbebfae085ea227cd94dd19ed5fe06cfc80d 100644
--- a/uc1-application/build.gradle
+++ b/uc1-application/build.gradle
@@ -1,31 +1 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
-}
-
-mainClassName = "uc1.application.HistoryService"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc1.application.HistoryService"
diff --git a/uc1-application/src/main/java/uc1/application/ConfigurationKeys.java b/uc1-application/src/main/java/spesb/uc1/application/ConfigurationKeys.java
similarity index 94%
rename from uc1-application/src/main/java/uc1/application/ConfigurationKeys.java
rename to uc1-application/src/main/java/spesb/uc1/application/ConfigurationKeys.java
index 27bf70b96364fd58bd8a8df59af6e8f38fcc9b29..7a275cb33a4cd35d228d8ca33ebb7303b251271b 100644
--- a/uc1-application/src/main/java/uc1/application/ConfigurationKeys.java
+++ b/uc1-application/src/main/java/spesb/uc1/application/ConfigurationKeys.java
@@ -1,4 +1,4 @@
-package uc1.application;
+package spesb.uc1.application;
 
 /**
  * Keys to access configuration parameters.
diff --git a/uc1-application/src/main/java/uc1/application/HistoryService.java b/uc1-application/src/main/java/spesb/uc1/application/HistoryService.java
similarity index 94%
rename from uc1-application/src/main/java/uc1/application/HistoryService.java
rename to uc1-application/src/main/java/spesb/uc1/application/HistoryService.java
index 2c7504ad44cf19a513302f222b53ec69d572c54a..18a39da7229d961249be900eeeff679e267a1eef 100644
--- a/uc1-application/src/main/java/uc1/application/HistoryService.java
+++ b/uc1-application/src/main/java/spesb/uc1/application/HistoryService.java
@@ -1,10 +1,10 @@
-package uc1.application;
+package spesb.uc1.application;
 
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import spesb.uc1.streamprocessing.KafkaStreamsBuilder;
 import titan.ccp.common.configuration.Configurations;
-import uc1.streamprocessing.KafkaStreamsBuilder;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
diff --git a/uc1-application/src/main/java/uc1/streamprocessing/KafkaStreamsBuilder.java b/uc1-application/src/main/java/spesb/uc1/streamprocessing/KafkaStreamsBuilder.java
similarity index 98%
rename from uc1-application/src/main/java/uc1/streamprocessing/KafkaStreamsBuilder.java
rename to uc1-application/src/main/java/spesb/uc1/streamprocessing/KafkaStreamsBuilder.java
index 22048d6fa337ac3016e8a65502285169018911c4..4b7f487c8e848f0b1d6d652b7d86a8c50c202af1 100644
--- a/uc1-application/src/main/java/uc1/streamprocessing/KafkaStreamsBuilder.java
+++ b/uc1-application/src/main/java/spesb/uc1/streamprocessing/KafkaStreamsBuilder.java
@@ -1,4 +1,4 @@
-package uc1.streamprocessing;
+package spesb.uc1.streamprocessing;
 
 import java.util.Objects;
 import java.util.Properties;
diff --git a/uc1-application/src/main/java/uc1/streamprocessing/TopologyBuilder.java b/uc1-application/src/main/java/spesb/uc1/streamprocessing/TopologyBuilder.java
similarity index 97%
rename from uc1-application/src/main/java/uc1/streamprocessing/TopologyBuilder.java
rename to uc1-application/src/main/java/spesb/uc1/streamprocessing/TopologyBuilder.java
index 1f112858a2153cfb0130379abe763c393520c271..279b70d0b7311f2b45b986e54cdf5b6c81c28263 100644
--- a/uc1-application/src/main/java/uc1/streamprocessing/TopologyBuilder.java
+++ b/uc1-application/src/main/java/spesb/uc1/streamprocessing/TopologyBuilder.java
@@ -1,4 +1,4 @@
-package uc1.streamprocessing;
+package spesb.uc1.streamprocessing;
 
 import com.google.gson.Gson;
 import org.apache.kafka.common.serialization.Serdes;
diff --git a/uc1-workload-generator/build.gradle b/uc1-workload-generator/build.gradle
index 824566a24158d5f535e0dec8ef948903738c9100..d934bd09de1d64cadac982669d7cab5b564f0dd5 100644
--- a/uc1-workload-generator/build.gradle
+++ b/uc1-workload-generator/build.gradle
@@ -1,31 +1 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
-}
-
-mainClassName = "uc1.workloadGenerator.LoadGenerator"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc1.workloadgenerator.LoadGenerator"
diff --git a/uc1-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java b/uc1-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
similarity index 99%
rename from uc1-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
rename to uc1-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
index 6c67cf722b4dce87f0bc197ba80f8f117f82198e..034201411a84d3769dbe8c02a210098c62dca881 100644
--- a/uc1-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
+++ b/uc1-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package kafkaSender;
+package spesb.kafkasender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc1-workload-generator/src/main/java/spesb/uc1/workloadgenerator/LoadGenerator.java b/uc1-workload-generator/src/main/java/spesb/uc1/workloadgenerator/LoadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..9eb95f0c104ee3a5cd497f735f839cdb474af6a9
--- /dev/null
+++ b/uc1-workload-generator/src/main/java/spesb/uc1/workloadgenerator/LoadGenerator.java
@@ -0,0 +1,92 @@
+package spesb.uc1.workloadgenerator;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import spesb.kafkasender.KafkaRecordSender;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGenerator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
+
+  private static final int WL_MAX_RECORDS = 150_000;
+
+  public static void main(final String[] args) throws InterruptedException, IOException {
+    LOGGER.info("Start workload generator for use case UC1.");
+
+    final int numSensors =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
+    final int instanceId = getInstanceId();
+    final int periodMs =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+    final String kafkaBootstrapServers =
+        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
+    final String kafkaInputTopic =
+        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+    final int idStart = instanceId * WL_MAX_RECORDS;
+    final int idEnd = Math.min((instanceId + 1) * WL_MAX_RECORDS, numSensors);
+    LOGGER.info("Generating data for sensors with IDs from {} to {} (exclusive).", idStart, idEnd);
+    final List<String> sensors = IntStream.range(idStart, idEnd)
+        .mapToObj(i -> "s_" + i)
+        .collect(Collectors.toList());
+
+    final Properties kafkaProperties = new Properties();
+    // kafkaProperties.put("acks", this.acknowledges);
+    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
+        kafkaBootstrapServers,
+        kafkaInputTopic,
+        r -> r.getIdentifier(),
+        r -> r.getTimestamp(),
+        kafkaProperties);
+
+    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+    final Random random = new Random();
+
+    for (final String sensor : sensors) {
+      final int initialDelay = random.nextInt(periodMs);
+      executor.scheduleAtFixedRate(() -> {
+        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+    }
+
+    System.out.println("Wait for termination...");
+    executor.awaitTermination(30, TimeUnit.DAYS);
+    System.out.println("Will terminate now");
+
+  }
+
+  private static int getInstanceId() {
+    final String podName = System.getenv("POD_NAME");
+    if (podName == null) {
+      return 0;
+    } else {
+      return Pattern.compile("-")
+          .splitAsStream(podName)
+          .reduce((p, x) -> x)
+          .map(Integer::parseInt)
+          .orElse(0);
+    }
+  }
+
+}
diff --git a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/ConfigPublisher.java b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/ConfigPublisher.java
deleted file mode 100644
index d0201b4dadeb8955c6505f95f2d6d333d427bd5b..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/ConfigPublisher.java
+++ /dev/null
@@ -1,48 +0,0 @@
-package uc1.workloadGenerator;
-
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.configuration.events.EventSerde;
-
-public class ConfigPublisher {
-
-	private final String topic;
-
-	private final Producer<Event, String> producer;
-
-	public ConfigPublisher(final String bootstrapServers, final String topic) {
-		this(bootstrapServers, topic, new Properties());
-	}
-
-	public ConfigPublisher(final String bootstrapServers, final String topic, final Properties defaultProperties) {
-		this.topic = topic;
-
-		final Properties properties = new Properties();
-		properties.putAll(defaultProperties);
-		properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-		properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-		properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
-
-		this.producer = new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
-	}
-
-	public void publish(final Event event, final String value) {
-		final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
-		try {
-			this.producer.send(record).get();
-		} catch (InterruptedException | ExecutionException e) {
-			throw new IllegalArgumentException(e);
-		}
-	}
-
-	public void close() {
-		this.producer.close();
-	}
-
-}
diff --git a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGenerator.java b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGenerator.java
deleted file mode 100644
index f75f8018b3f32cfe003b09c0f2481fdd56dc8be3..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGenerator.java
+++ /dev/null
@@ -1,87 +0,0 @@
-package uc1.workloadGenerator;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import kafkaSender.KafkaRecordSender;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-	public static void main(final String[] args) throws InterruptedException, IOException {
-		// uc1
-
-		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
-		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-		final boolean sendRegistry = Boolean
-				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
-				"localhost:9092");
-		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-		// create sensorRegistry
-		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-		addChildrens(sensorRegistry.getTopLevelSensor(), numSensor, 0);
-
-		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-				.collect(Collectors.toList());
-
-		// TODO Brauchen wir das ?
-		if (sendRegistry) {
-			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
-			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-			configPublisher.close();
-			System.out.println("Configuration sent.");
-
-			System.out.println("Now wait 30 seconds");
-			Thread.sleep(30_000);
-			System.out.println("And woke up again :)");
-		}
-
-		final Properties kafkaProperties = new Properties();
-		// kafkaProperties.put("acks", this.acknowledges);
-		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
-				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-		final Random random = new Random();
-
-		for (final String sensor : sensors) {
-			final int initialDelay = random.nextInt(periodMs);
-			executor.scheduleAtFixedRate(() -> {
-				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-		}
-
-		System.out.println("Wait for termination...");
-		executor.awaitTermination(30, TimeUnit.DAYS);
-		System.out.println("Will terminate now");
-
-	}
-
-	private static void addChildrens(final MutableAggregatedSensor parent, final int numChildren, int nextId) {
-		for (int c = 0; c < numChildren; c++) {
-			parent.addChildMachineSensor("s_" + nextId);
-			nextId++;
-		}
-	}
-
-}
diff --git a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGeneratorExtrem.java b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGeneratorExtrem.java
deleted file mode 100644
index 1670778fd0136f7f1386390776384faeb8594712..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGeneratorExtrem.java
+++ /dev/null
@@ -1,147 +0,0 @@
-package uc1.workloadGenerator;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import kafkaSender.KafkaRecordSender;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGeneratorExtrem {
-
-	public static void main(final String[] args) throws InterruptedException, IOException {
-
-		final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-		final int numNestedGroups = Integer
-				.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-		final boolean sendRegistry = Boolean
-				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-		final boolean doNothing = Boolean
-				.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-		final int producers = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
-		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
-				"localhost:9092");
-		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-		final SensorRegistry sensorRegistry = buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
-
-		if (sendRegistry) {
-			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
-			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-			configPublisher.close();
-			System.out.println("Configuration sent.");
-
-			System.out.println("Now wait 30 seconds");
-			Thread.sleep(30_000);
-			System.out.println("And woke up again :)");
-		}
-
-		final Properties kafkaProperties = new Properties();
-		// kafkaProperties.put("acks", this.acknowledges);
-		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-		final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-				.<KafkaRecordSender<ActivePowerRecord>>generate(() -> new KafkaRecordSender<>(kafkaBootstrapServers,
-						kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties))
-				.limit(producers).collect(Collectors.toList());
-
-		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-				.collect(Collectors.toList());
-
-		for (int i = 0; i < threads; i++) {
-			final int threadId = i;
-			new Thread(() -> {
-				while (true) {
-					for (final String sensor : sensors) {
-						if (!doNothing) {
-							kafkaRecordSenders.get(threadId % producers)
-									.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-						}
-					}
-				}
-			}).start();
-		}
-
-		while (true) {
-			printCpuUsagePerThread();
-		}
-
-		// System.out.println("Wait for termination...");
-		// Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-		// System.out.println("Will terminate now");
-	}
-
-	private static void printCpuUsagePerThread() throws InterruptedException {
-		final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-		final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-		final long start = System.nanoTime();
-		final long[] startCpuTimes = new long[threads.size()];
-		for (int i = 0; i < threads.size(); i++) {
-			final Thread thread = threads.get(i);
-			startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-		}
-
-		Thread.sleep(5000);
-
-		for (int i = 0; i < threads.size(); i++) {
-			final Thread thread = threads.get(i);
-			final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-			final long dur = System.nanoTime() - start;
-			final double util = (double) cpuTime / dur;
-			System.out.println("Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-		}
-	}
-
-	private static SensorRegistry buildSensorRegistry(final String hierarchy, final int numNestedGroups,
-			final int numSensor) {
-		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-		if (hierarchy.equals("deep")) {
-			MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-			for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-				lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-			}
-			for (int s = 0; s < numSensor; s++) {
-				lastSensor.addChildMachineSensor("sensor_" + s);
-			}
-		} else if (hierarchy.equals("full")) {
-			addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-		} else {
-			throw new IllegalStateException();
-		}
-		return sensorRegistry;
-	}
-
-	private static int addChildren(final MutableAggregatedSensor parent, final int numChildren, final int lvl,
-			final int maxLvl, int nextId) {
-		for (int c = 0; c < numChildren; c++) {
-			if (lvl == maxLvl) {
-				parent.addChildMachineSensor("s_" + nextId);
-				nextId++;
-			} else {
-				final MutableAggregatedSensor newParent = parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-				nextId++;
-				nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-			}
-		}
-		return nextId;
-	}
-
-}
diff --git a/uc2-application/build.gradle b/uc2-application/build.gradle
index 42bc3e0770db50e93bf2a08d5c039677489c4492..90f54fc6110ac88ef7d0d80ae8ec60c6087ce808 100644
--- a/uc2-application/build.gradle
+++ b/uc2-application/build.gradle
@@ -1,31 +1 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
-}
-
-mainClassName = "uc2.application.AggregationService"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc2.application.AggregationService"
diff --git a/uc2-application/src/main/java/uc2/application/AggregationService.java b/uc2-application/src/main/java/spesb/uc2/application/AggregationService.java
similarity index 95%
rename from uc2-application/src/main/java/uc2/application/AggregationService.java
rename to uc2-application/src/main/java/spesb/uc2/application/AggregationService.java
index 696b13f4889a988282467aca3e4241938e636d7c..79d8c94c75ede32d92485d4b3c49d716ae19ccf8 100644
--- a/uc2-application/src/main/java/uc2/application/AggregationService.java
+++ b/uc2-application/src/main/java/spesb/uc2/application/AggregationService.java
@@ -1,11 +1,11 @@
-package uc2.application;
+package spesb.uc2.application;
 
 import java.time.Duration;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import spesb.uc2.streamprocessing.KafkaStreamsBuilder;
 import titan.ccp.common.configuration.Configurations;
-import uc2.streamprocessing.KafkaStreamsBuilder;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates
diff --git a/uc2-application/src/main/java/uc2/application/ConfigurationKeys.java b/uc2-application/src/main/java/spesb/uc2/application/ConfigurationKeys.java
similarity index 96%
rename from uc2-application/src/main/java/uc2/application/ConfigurationKeys.java
rename to uc2-application/src/main/java/spesb/uc2/application/ConfigurationKeys.java
index 08d5e1eb26535b91462a2954e57037f20e3d62e9..ec3bb14be72a1032fa2dfd49cdd6d3c0cb0b18e6 100644
--- a/uc2-application/src/main/java/uc2/application/ConfigurationKeys.java
+++ b/uc2-application/src/main/java/spesb/uc2/application/ConfigurationKeys.java
@@ -1,4 +1,4 @@
-package uc2.application;
+package spesb.uc2.application;
 
 /**
  * Keys to access configuration parameters.
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformer.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/ChildParentsTransformer.java
similarity index 99%
rename from uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformer.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/ChildParentsTransformer.java
index 4315aad5bc211d9342ee1703ead357d0786a2e0e..82217b30a539a9f722c3f27777000fb1d7d6e97c 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformer.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/ChildParentsTransformer.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.Map;
 import java.util.Optional;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformerFactory.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/ChildParentsTransformerFactory.java
similarity index 97%
rename from uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformerFactory.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/ChildParentsTransformerFactory.java
index 5029c02446b0b191edf0cc498165465d30516504..6cf2d2c6f3facc96f76148e874244cbe895a8596 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformerFactory.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/ChildParentsTransformerFactory.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.Map;
 import java.util.Optional;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformer.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/JointFlatTransformer.java
similarity index 98%
rename from uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformer.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/JointFlatTransformer.java
index 87a1d9967295995ce5dc46e0f1a9f5f52ffae469..27857fa4505d679f841d2bae639506cb8eeb0845 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformer.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/JointFlatTransformer.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import com.google.common.base.MoreObjects;
 import java.util.ArrayList;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformerFactory.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/JointFlatTransformerFactory.java
similarity index 97%
rename from uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformerFactory.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/JointFlatTransformerFactory.java
index 5ddb07850e4c14418b9014c8a240c677cb548259..44c99b1f50475c3bd1051322001877e98bca9b68 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformerFactory.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/JointFlatTransformerFactory.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.Map;
 import java.util.Set;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/JointRecordParents.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/JointRecordParents.java
similarity index 94%
rename from uc2-application/src/main/java/uc2/streamprocessing/JointRecordParents.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/JointRecordParents.java
index 74fb5441f9a716af4ddd279b4b5fff0466697a23..64de26d996d4b87b3942491a36607a2b09bf43f0 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/JointRecordParents.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/JointRecordParents.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.Set;
 import titan.ccp.models.records.ActivePowerRecord;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/KafkaStreamsBuilder.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/KafkaStreamsBuilder.java
similarity index 99%
rename from uc2-application/src/main/java/uc2/streamprocessing/KafkaStreamsBuilder.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/KafkaStreamsBuilder.java
index eb0643d63f934e7966bca74a7ff7356b2aefb259..9b43f5e66fb4336602c026df8941d5545f39bfb4 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/KafkaStreamsBuilder.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/KafkaStreamsBuilder.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/OptionalParentsSerde.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/OptionalParentsSerde.java
similarity index 97%
rename from uc2-application/src/main/java/uc2/streamprocessing/OptionalParentsSerde.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/OptionalParentsSerde.java
index e4624d9531fc476d707d1b712dddb553a69b3823..5e31a55406a321d393098633856d9b2776768676 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/OptionalParentsSerde.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/OptionalParentsSerde.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.HashSet;
 import java.util.Optional;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/ParentsSerde.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/ParentsSerde.java
similarity index 96%
rename from uc2-application/src/main/java/uc2/streamprocessing/ParentsSerde.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/ParentsSerde.java
index 327f33a10b6450c6d16d155314bff76aa18913d9..4385d3bfb9360755fbfa13217abcb95f786ebd39 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/ParentsSerde.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/ParentsSerde.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.HashSet;
 import java.util.Set;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/RecordAggregator.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/RecordAggregator.java
similarity index 97%
rename from uc2-application/src/main/java/uc2/streamprocessing/RecordAggregator.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/RecordAggregator.java
index 0b3e23462ccd61bdd71b485de62c28e89168374a..6951d49c94c8b14d4463fcfdd6274a0b1cf965f7 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/RecordAggregator.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/RecordAggregator.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import org.apache.kafka.streams.kstream.Windowed;
 import titan.ccp.models.records.ActivePowerRecord;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKey.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/SensorParentKey.java
similarity index 94%
rename from uc2-application/src/main/java/uc2/streamprocessing/SensorParentKey.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/SensorParentKey.java
index 4cb3bc9c6ec31a6ee086adffb4db188e348c040f..390ecf0e381435197cf7e741a0e306f3dcca3f2c 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKey.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/SensorParentKey.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 /**
  * A key consisting of the identifier of a sensor and an identifier of parent sensor.
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKeySerde.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/SensorParentKeySerde.java
similarity index 96%
rename from uc2-application/src/main/java/uc2/streamprocessing/SensorParentKeySerde.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/SensorParentKeySerde.java
index 1a2688c2bac2dc3e69d786c6ff395106f0a0f58c..7021c0832db2af6836f53ee6ba70851514443759 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKeySerde.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/SensorParentKeySerde.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import org.apache.kafka.common.serialization.Serde;
 import titan.ccp.common.kafka.simpleserdes.BufferSerde;
diff --git a/uc2-application/src/main/java/uc2/streamprocessing/TopologyBuilder.java b/uc2-application/src/main/java/spesb/uc2/streamprocessing/TopologyBuilder.java
similarity index 99%
rename from uc2-application/src/main/java/uc2/streamprocessing/TopologyBuilder.java
rename to uc2-application/src/main/java/spesb/uc2/streamprocessing/TopologyBuilder.java
index a6b377b0ead972c89c58d405279a571f545ae91b..c83de4efd43688a8b9669f5d0f3dea3bbf70f48b 100644
--- a/uc2-application/src/main/java/uc2/streamprocessing/TopologyBuilder.java
+++ b/uc2-application/src/main/java/spesb/uc2/streamprocessing/TopologyBuilder.java
@@ -1,4 +1,4 @@
-package uc2.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import com.google.common.math.StatsAccumulator;
 import java.time.Duration;
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java b/uc2-application/src/test/java/spesb/uc2/streamprocessing/OptionalParentsSerdeTest.java
similarity index 89%
rename from uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java
rename to uc2-application/src/test/java/spesb/uc2/streamprocessing/OptionalParentsSerdeTest.java
index f92af2b5a908f8c4efb8ec02a00c62b9925cb41f..dc9f7e20e60564df7b982d8c3635cf2678c829c4 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java
+++ b/uc2-application/src/test/java/spesb/uc2/streamprocessing/OptionalParentsSerdeTest.java
@@ -1,9 +1,9 @@
-package titan.ccp.aggregation.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.Optional;
 import java.util.Set;
 import org.junit.Test;
-import uc2.streamprocessing.OptionalParentsSerde;
+import spesb.uc2.streamprocessing.OptionalParentsSerde;
 
 public class OptionalParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java b/uc2-application/src/test/java/spesb/uc2/streamprocessing/ParentsSerdeTest.java
similarity index 82%
rename from uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java
rename to uc2-application/src/test/java/spesb/uc2/streamprocessing/ParentsSerdeTest.java
index 715a14f47ee1d8243070344ea40edba37ee595fd..7f166669bc34ea6d5482504f2d5ada4c26f64fc8 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java
+++ b/uc2-application/src/test/java/spesb/uc2/streamprocessing/ParentsSerdeTest.java
@@ -1,8 +1,8 @@
-package titan.ccp.aggregation.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import java.util.Set;
 import org.junit.Test;
-import uc2.streamprocessing.ParentsSerde;
+import spesb.uc2.streamprocessing.ParentsSerde;
 
 public class ParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java b/uc2-application/src/test/java/spesb/uc2/streamprocessing/SensorParentKeySerdeTest.java
similarity index 77%
rename from uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java
rename to uc2-application/src/test/java/spesb/uc2/streamprocessing/SensorParentKeySerdeTest.java
index 3090c9efb7e1fa846f5dc10fae0e917802853c39..5e0495f85423c582a17aefc1fda1a7c937ce14f7 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java
+++ b/uc2-application/src/test/java/spesb/uc2/streamprocessing/SensorParentKeySerdeTest.java
@@ -1,8 +1,8 @@
-package titan.ccp.aggregation.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import org.junit.Test;
-import uc2.streamprocessing.SensorParentKey;
-import uc2.streamprocessing.SensorParentKeySerde;
+import spesb.uc2.streamprocessing.SensorParentKey;
+import spesb.uc2.streamprocessing.SensorParentKeySerde;
 
 public class SensorParentKeySerdeTest {
 
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SerdeTester.java b/uc2-application/src/test/java/spesb/uc2/streamprocessing/SerdeTester.java
similarity index 93%
rename from uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SerdeTester.java
rename to uc2-application/src/test/java/spesb/uc2/streamprocessing/SerdeTester.java
index 47c34a23f791d961dacb6ea530462c8e0ed94946..443d8b845b89a595f4280c4d0b0ae845c542b338 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SerdeTester.java
+++ b/uc2-application/src/test/java/spesb/uc2/streamprocessing/SerdeTester.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import static org.junit.Assert.assertEquals;
 import java.util.function.Function;
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SerdeTesterFactory.java b/uc2-application/src/test/java/spesb/uc2/streamprocessing/SerdeTesterFactory.java
similarity index 93%
rename from uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SerdeTesterFactory.java
rename to uc2-application/src/test/java/spesb/uc2/streamprocessing/SerdeTesterFactory.java
index ce45272c00d82c8600dd11485d2e2c307ada9de5..9e5549fc1ced4ff4012ae699a8e6cdf65726f9a3 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SerdeTesterFactory.java
+++ b/uc2-application/src/test/java/spesb/uc2/streamprocessing/SerdeTesterFactory.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package spesb.uc2.streamprocessing;
 
 import org.apache.kafka.common.serialization.Serde;
 
diff --git a/uc2-workload-generator/build.gradle b/uc2-workload-generator/build.gradle
index 833291011bb60bf84fcec323f2f0f63f9915d245..d165ab24e81b56d85f8183b41d5ec4f254be43b8 100644
--- a/uc2-workload-generator/build.gradle
+++ b/uc2-workload-generator/build.gradle
@@ -1,31 +1 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
-}
-
-mainClassName = "uc2.workloadGenerator.LoadGenerator"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc2.workloadgenerator.LoadGenerator"
diff --git a/uc4-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java b/uc2-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
similarity index 99%
rename from uc4-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
rename to uc2-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
index 6c67cf722b4dce87f0bc197ba80f8f117f82198e..034201411a84d3769dbe8c02a210098c62dca881 100644
--- a/uc4-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
+++ b/uc2-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package kafkaSender;
+package spesb.kafkasender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/ConfigPublisher.java b/uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/ConfigPublisher.java
similarity index 97%
rename from uc3-workload-generator/src/main/java/uc3/workloadGenerator/ConfigPublisher.java
rename to uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/ConfigPublisher.java
index ab36397d810c276cf6e1e134364650a64d5997d1..8cc3095fff902336273bf1145270a22044fad97e 100644
--- a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/ConfigPublisher.java
+++ b/uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/ConfigPublisher.java
@@ -1,4 +1,4 @@
-package uc3.workloadGenerator;
+package spesb.uc2.workloadgenerator;
 
 import java.util.Properties;
 import java.util.concurrent.ExecutionException;
diff --git a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGeneratorExtrem.java b/uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/LoadGenerator.java
similarity index 50%
rename from uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGeneratorExtrem.java
rename to uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/LoadGenerator.java
index e13030e23d9dd945553abd9f919d0873e4b23bda..c2b05be3f525af95a8995704ab49a48343cb4f93 100644
--- a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGeneratorExtrem.java
+++ b/uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/LoadGenerator.java
@@ -1,51 +1,68 @@
-package uc2.workloadGenerator;
+package spesb.uc2.workloadgenerator;
 
 import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Objects;
 import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import kafkaSender.KafkaRecordSender;
 import org.apache.kafka.clients.producer.ProducerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import spesb.kafkasender.KafkaRecordSender;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
 import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
 import titan.ccp.models.records.ActivePowerRecord;
 
-public class LoadGeneratorExtrem {
+public class LoadGenerator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
 
   public static void main(final String[] args) throws InterruptedException, IOException {
+    LOGGER.info("Start workload generator for use case UC2.");
 
     final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
+    final int numNestedGroups = Integer
+        .parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
     final int numSensor =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final boolean doNothing =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final int producers =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
+    final int periodMs =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final boolean sendRegistry = Boolean
+        .parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
+    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
     final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
+        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+            "localhost:9092");
     final String kafkaInputTopic =
         Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
     final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
     final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
     final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
 
-    final SensorRegistry sensorRegistry =
-        buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
+    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+    if (hierarchy.equals("deep")) {
+      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
+      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
+        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
+      }
+      for (int s = 0; s < numSensor; s++) {
+        lastSensor.addChildMachineSensor("sensor_" + s);
+      }
+    } else if (hierarchy.equals("full")) {
+      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
+    } else {
+      throw new IllegalStateException();
+    }
+
+    final List<String> sensors =
+        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
+            .collect(Collectors.toList());
 
     if (sendRegistry) {
       final ConfigPublisher configPublisher =
@@ -64,90 +81,29 @@ public class LoadGeneratorExtrem {
     kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
     kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
     kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-        .<KafkaRecordSender<ActivePowerRecord>>generate(
-            () -> new KafkaRecordSender<>(
-                kafkaBootstrapServers,
-                kafkaInputTopic,
-                r -> r.getIdentifier(),
-                r -> r.getTimestamp(),
-                kafkaProperties))
-        .limit(producers)
-        .collect(Collectors.toList());
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    for (int i = 0; i < threads; i++) {
-      final int threadId = i;
-      new Thread(() -> {
-        while (true) {
-          for (final String sensor : sensors) {
-            if (!doNothing) {
-              kafkaRecordSenders.get(threadId % producers).write(new ActivePowerRecord(
-                  sensor,
-                  System.currentTimeMillis(),
-                  value));
-            }
-          }
-        }
-      }).start();
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender<>(kafkaBootstrapServers,
+            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
+
+    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+    final Random random = new Random();
+
+    for (final String sensor : sensors) {
+      final int initialDelay = random.nextInt(periodMs);
+      executor.scheduleAtFixedRate(() -> {
+        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
     }
 
-    while (true) {
-      printCpuUsagePerThread();
-    }
-
-    // System.out.println("Wait for termination...");
-    // Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-    // System.out.println("Will terminate now");
-  }
-
-  private static void printCpuUsagePerThread() throws InterruptedException {
-    final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-    final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-    final long start = System.nanoTime();
-    final long[] startCpuTimes = new long[threads.size()];
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-    }
+    System.out.println("Wait for termination...");
+    executor.awaitTermination(30, TimeUnit.DAYS);
+    System.out.println("Will terminate now");
 
-    Thread.sleep(5000);
-
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-      final long dur = System.nanoTime() - start;
-      final double util = (double) cpuTime / dur;
-      System.out.println(
-          "Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-    }
-  }
-
-  private static SensorRegistry buildSensorRegistry(final String hierarchy,
-      final int numNestedGroups, final int numSensor) {
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-    return sensorRegistry;
   }
 
   private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
+      final int lvl,
+      final int maxLvl, int nextId) {
     for (int c = 0; c < numChildren; c++) {
       if (lvl == maxLvl) {
         parent.addChildMachineSensor("s_" + nextId);
diff --git a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGeneratorExtrem.java b/uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/LoadGeneratorExtrem.java
similarity index 98%
rename from uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGeneratorExtrem.java
rename to uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/LoadGeneratorExtrem.java
index 2361cf2c04a1bc3bd05af089e6bdf72213eb6cb1..c78647edbb4f829237c25237d9edc9d11beeffc5 100644
--- a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGeneratorExtrem.java
+++ b/uc2-workload-generator/src/main/java/spesb/uc2/workloadgenerator/LoadGeneratorExtrem.java
@@ -1,4 +1,4 @@
-package uc3.workloadGenerator;
+package spesb.uc2.workloadgenerator;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -9,8 +9,8 @@ import java.util.Objects;
 import java.util.Properties;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-import kafkaSender.KafkaRecordSender;
 import org.apache.kafka.clients.producer.ProducerConfig;
+import spesb.kafkasender.KafkaRecordSender;
 import titan.ccp.configuration.events.Event;
 import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
 import titan.ccp.model.sensorregistry.MutableSensorRegistry;
diff --git a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/ConfigPublisher.java b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/ConfigPublisher.java
deleted file mode 100644
index 56625e454b42b6620b21261e7a57969f83707dfe..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/ConfigPublisher.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package uc2.workloadGenerator;
-
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.configuration.events.EventSerde;
-
-public class ConfigPublisher {
-
-  private final String topic;
-
-  private final Producer<Event, String> producer;
-
-  public ConfigPublisher(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, new Properties());
-  }
-
-  public ConfigPublisher(final String bootstrapServers, final String topic,
-      final Properties defaultProperties) {
-    this.topic = topic;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
-
-    this.producer =
-        new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
-  }
-
-  public void publish(final Event event, final String value) {
-    final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
-    try {
-      this.producer.send(record).get();
-    } catch (InterruptedException | ExecutionException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
-
-  public void close() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGenerator.java b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGenerator.java
deleted file mode 100644
index c818aadd5e8c61088297f200b134e93e5b765a06..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGenerator.java
+++ /dev/null
@@ -1,107 +0,0 @@
-package uc2.workloadGenerator;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import kafkaSender.KafkaRecordSender;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-	public static void main(final String[] args) throws InterruptedException, IOException {
-
-		final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-		final int numNestedGroups = Integer
-				.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-		final boolean sendRegistry = Boolean
-				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
-				"localhost:9092");
-		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-		if (hierarchy.equals("deep")) {
-			MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-			for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-				lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-			}
-			for (int s = 0; s < numSensor; s++) {
-				lastSensor.addChildMachineSensor("sensor_" + s);
-			}
-		} else if (hierarchy.equals("full")) {
-			addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-		} else {
-			throw new IllegalStateException();
-		}
-
-		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-				.collect(Collectors.toList());
-
-		if (sendRegistry) {
-			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
-			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-			configPublisher.close();
-			System.out.println("Configuration sent.");
-
-			System.out.println("Now wait 30 seconds");
-			Thread.sleep(30_000);
-			System.out.println("And woke up again :)");
-		}
-
-		final Properties kafkaProperties = new Properties();
-		// kafkaProperties.put("acks", this.acknowledges);
-		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
-				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-		final Random random = new Random();
-
-		for (final String sensor : sensors) {
-			final int initialDelay = random.nextInt(periodMs);
-			executor.scheduleAtFixedRate(() -> {
-				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-		}
-
-		System.out.println("Wait for termination...");
-		executor.awaitTermination(30, TimeUnit.DAYS);
-		System.out.println("Will terminate now");
-
-	}
-
-	private static int addChildren(final MutableAggregatedSensor parent, final int numChildren, final int lvl,
-			final int maxLvl, int nextId) {
-		for (int c = 0; c < numChildren; c++) {
-			if (lvl == maxLvl) {
-				parent.addChildMachineSensor("s_" + nextId);
-				nextId++;
-			} else {
-				final MutableAggregatedSensor newParent = parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-				nextId++;
-				nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-			}
-		}
-		return nextId;
-	}
-
-}
diff --git a/uc3-application/build.gradle b/uc3-application/build.gradle
index 01f47ffbcde8beb2e02f40ff498516742b3ed49f..89d122ba69512548a011505c71f636c0bd3b0b47 100644
--- a/uc3-application/build.gradle
+++ b/uc3-application/build.gradle
@@ -1,23 +1,5 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-allprojects { 
+allprojects {
 	repositories {
-	    jcenter()
-	    maven {
-	    	url "https://oss.sonatype.org/content/repositories/snapshots/"
-	    }
     	maven {
     		url 'https://packages.confluent.io/maven/'
     	}
@@ -25,20 +7,7 @@ allprojects {
 }
 
 dependencies {
-    compile project(':')
-    
     compile('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT')
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "uc3.application.HistoryService"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc3.application.HistoryService"
diff --git a/uc3-application/src/main/java/uc3/application/ConfigurationKeys.java b/uc3-application/src/main/java/spesb/uc3/application/ConfigurationKeys.java
similarity index 95%
rename from uc3-application/src/main/java/uc3/application/ConfigurationKeys.java
rename to uc3-application/src/main/java/spesb/uc3/application/ConfigurationKeys.java
index 8849279792e5192c003fa6d82257e3a162cbaac0..df51385a6f61cf25028f1d45552fa9687f40dc15 100644
--- a/uc3-application/src/main/java/uc3/application/ConfigurationKeys.java
+++ b/uc3-application/src/main/java/spesb/uc3/application/ConfigurationKeys.java
@@ -1,4 +1,4 @@
-package uc3.application;
+package spesb.uc3.application;
 
 /**
  * Keys to access configuration parameters.
diff --git a/uc3-application/src/main/java/uc3/application/HistoryService.java b/uc3-application/src/main/java/spesb/uc3/application/HistoryService.java
similarity index 95%
rename from uc3-application/src/main/java/uc3/application/HistoryService.java
rename to uc3-application/src/main/java/spesb/uc3/application/HistoryService.java
index 932b9559c69309100c79c4cbc093bd91a503e5cd..2b6c40e51a09e179778209d0626da6f6718bc07a 100644
--- a/uc3-application/src/main/java/uc3/application/HistoryService.java
+++ b/uc3-application/src/main/java/spesb/uc3/application/HistoryService.java
@@ -1,12 +1,12 @@
-package uc3.application;
+package spesb.uc3.application;
 
 import java.time.Duration;
 import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import spesb.uc3.streamprocessing.KafkaStreamsBuilder;
 import titan.ccp.common.configuration.Configurations;
-import uc3.streamprocessing.KafkaStreamsBuilder;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
diff --git a/uc3-application/src/main/java/spesb/uc3/streamprocessing/KafkaStreamsBuilder.java b/uc3-application/src/main/java/spesb/uc3/streamprocessing/KafkaStreamsBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..28382bedd3b02ceb2c48925212087c28ed371aad
--- /dev/null
+++ b/uc3-application/src/main/java/spesb/uc3/streamprocessing/KafkaStreamsBuilder.java
@@ -0,0 +1,105 @@
+package spesb.uc3.streamprocessing;
+
+import java.time.Duration;
+import java.util.Objects;
+import java.util.Properties;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.StreamsConfig;
+import titan.ccp.common.kafka.streams.PropertiesBuilder;
+
+/**
+ * Builder for the Kafka Streams configuration.
+ */
+public class KafkaStreamsBuilder {
+
+  private static final String APPLICATION_NAME = "titan-ccp-history";
+  private static final String APPLICATION_VERSION = "0.0.1";
+
+  // private static final Logger LOGGER =
+  // LoggerFactory.getLogger(KafkaStreamsBuilder.class);
+
+  private String bootstrapServers; // NOPMD
+  private String inputTopic; // NOPMD
+  private String outputTopic; // NOPMD
+  private Duration windowDuration; // NOPMD
+  private int numThreads = -1; // NOPMD
+  private int commitIntervalMs = -1; // NOPMD
+  private int cacheMaxBytesBuff = -1; // NOPMD
+
+  public KafkaStreamsBuilder inputTopic(final String inputTopic) {
+    this.inputTopic = inputTopic;
+    return this;
+  }
+
+  public KafkaStreamsBuilder bootstrapServers(final String bootstrapServers) {
+    this.bootstrapServers = bootstrapServers;
+    return this;
+  }
+
+  public KafkaStreamsBuilder outputTopic(final String outputTopic) {
+    this.outputTopic = outputTopic;
+    return this;
+  }
+
+  public KafkaStreamsBuilder windowDuration(final Duration windowDuration) {
+    this.windowDuration = windowDuration;
+    return this;
+  }
+
+  /**
+   * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
+   * one for using the default.
+   */
+  public KafkaStreamsBuilder numThreads(final int numThreads) {
+    if (numThreads < -1 || numThreads == 0) {
+      throw new IllegalArgumentException("Number of threads must be greater 0 or -1.");
+    }
+    this.numThreads = numThreads;
+    return this;
+  }
+
+  /**
+   * Sets the Kafka Streams property for the frequency with which to save the position (offsets in
+   * source topics) of tasks (commit.interval.ms). Must be zero for processing all record, for
+   * example, when processing bulks of records. Can be minus one for using the default.
+   */
+  public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
+    if (commitIntervalMs < -1) {
+      throw new IllegalArgumentException("Commit interval must be greater or equal -1.");
+    }
+    this.commitIntervalMs = commitIntervalMs;
+    return this;
+  }
+
+  /**
+   * Sets the Kafka Streams property for maximum number of memory bytes to be used for record caches
+   * across all threads (cache.max.bytes.buffering). Must be zero for processing all record, for
+   * example, when processing bulks of records. Can be minus one for using the default.
+   */
+  public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
+    if (cacheMaxBytesBuffering < -1) {
+      throw new IllegalArgumentException("Cache max bytes buffering must be greater or equal -1.");
+    }
+    this.cacheMaxBytesBuff = cacheMaxBytesBuffering;
+    return this;
+  }
+
+  /**
+   * Builds the {@link KafkaStreams} instance.
+   */
+  public KafkaStreams build() {
+    Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
+    // TODO log parameters
+    final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic,
+        this.windowDuration);
+    final Properties properties = PropertiesBuilder.bootstrapServers(this.bootstrapServers)
+        .applicationId(APPLICATION_NAME + '-' + APPLICATION_VERSION) // TODO as parameter
+        .set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
+        .set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
+        .set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
+        // .set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG")
+        .build();
+    return new KafkaStreams(topologyBuilder.build(), properties);
+  }
+
+}
diff --git a/uc3-application/src/main/java/uc3/streamprocessing/TopologyBuilder.java b/uc3-application/src/main/java/spesb/uc3/streamprocessing/TopologyBuilder.java
similarity index 96%
rename from uc3-application/src/main/java/uc3/streamprocessing/TopologyBuilder.java
rename to uc3-application/src/main/java/spesb/uc3/streamprocessing/TopologyBuilder.java
index fe20b3ab0347d5714061cc8c30735b3fb49d7ea7..d79451088e78e07f003dc076933f20489f594523 100644
--- a/uc3-application/src/main/java/uc3/streamprocessing/TopologyBuilder.java
+++ b/uc3-application/src/main/java/spesb/uc3/streamprocessing/TopologyBuilder.java
@@ -1,4 +1,4 @@
-package uc3.streamprocessing;
+package spesb.uc3.streamprocessing;
 
 import com.google.common.math.Stats;
 import java.time.Duration;
@@ -12,10 +12,10 @@ import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.kstream.TimeWindows;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import spesb.uc3.streamprocessing.util.StatsFactory;
 import titan.ccp.common.kafka.GenericSerde;
 import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
 import titan.ccp.models.records.ActivePowerRecordFactory;
-import uc3.streamprocessing.util.StatsFactory;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/util/StatsFactory.java b/uc3-application/src/main/java/spesb/uc3/streamprocessing/util/StatsFactory.java
similarity index 92%
rename from uc4-application/src/main/java/uc4/streamprocessing/util/StatsFactory.java
rename to uc3-application/src/main/java/spesb/uc3/streamprocessing/util/StatsFactory.java
index 60035ea7f04dc0f9f36963c6ecea2b020fc48874..964199c0083dc9d096b59227c181dba732ca72b4 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/util/StatsFactory.java
+++ b/uc3-application/src/main/java/spesb/uc3/streamprocessing/util/StatsFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing.util;
+package spesb.uc3.streamprocessing.util;
 
 import com.google.common.math.Stats;
 import com.google.common.math.StatsAccumulator;
diff --git a/uc3-workload-generator/build.gradle b/uc3-workload-generator/build.gradle
index 5cb1fdfccc5677b64447b3e644e7fca47c2cd571..e27cf26d28ba0d3f85a4c2a11e4eae2b85f29e4c 100644
--- a/uc3-workload-generator/build.gradle
+++ b/uc3-workload-generator/build.gradle
@@ -1,31 +1 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
-}
-
-mainClassName = "uc3.workloadGenerator.LoadGenerator"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc3.workloadgenerator.LoadGenerator"
diff --git a/uc2-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java b/uc3-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
similarity index 99%
rename from uc2-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
rename to uc3-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
index 6c67cf722b4dce87f0bc197ba80f8f117f82198e..034201411a84d3769dbe8c02a210098c62dca881 100644
--- a/uc2-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
+++ b/uc3-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package kafkaSender;
+package spesb.kafkasender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc3-workload-generator/src/main/java/spesb/uc3/workloadgenerator/LoadGenerator.java b/uc3-workload-generator/src/main/java/spesb/uc3/workloadgenerator/LoadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..9ab8a553013c7d6de1eba6a2a9676fc152f86b5d
--- /dev/null
+++ b/uc3-workload-generator/src/main/java/spesb/uc3/workloadgenerator/LoadGenerator.java
@@ -0,0 +1,92 @@
+package spesb.uc3.workloadgenerator;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import spesb.kafkasender.KafkaRecordSender;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGenerator {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
+
+  private static final int WL_MAX_RECORDS = 150_000;
+
+  public static void main(final String[] args) throws InterruptedException, IOException {
+    LOGGER.info("Start workload generator for use case UC3.");
+
+    final int numSensors =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
+    final int instanceId = getInstanceId();
+    final int periodMs =
+        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+    final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+    final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+    final String kafkaBootstrapServers =
+        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+            "localhost:9092");
+    final String kafkaInputTopic =
+        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+    final int idStart = instanceId * WL_MAX_RECORDS;
+    final int idEnd = Math.min((instanceId + 1) * WL_MAX_RECORDS, numSensors);
+    LOGGER.info("Generating data for sensors with IDs from {} to {} (exclusive).", idStart, idEnd);
+    final List<String> sensors = IntStream.range(idStart, idEnd)
+        .mapToObj(i -> "s_" + i)
+        .collect(Collectors.toList());
+
+    final Properties kafkaProperties = new Properties();
+    // kafkaProperties.put("acks", this.acknowledges);
+    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender =
+        new KafkaRecordSender<>(kafkaBootstrapServers,
+            kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
+
+    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+    final Random random = new Random();
+
+    LOGGER.info("Start setting up sensors.");
+    for (final String sensor : sensors) {
+      final int initialDelay = random.nextInt(periodMs);
+      executor.scheduleAtFixedRate(() -> {
+        kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+      }, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+    }
+    LOGGER.info("Finished setting up sensors.");
+
+    System.out.println("Wait for termination...");
+    executor.awaitTermination(30, TimeUnit.DAYS);
+    System.out.println("Will terminate now");
+
+  }
+
+  private static int getInstanceId() {
+    final String podName = System.getenv("POD_NAME");
+    if (podName == null) {
+      return 0;
+    } else {
+      return Pattern.compile("-")
+          .splitAsStream(podName)
+          .reduce((p, x) -> x)
+          .map(Integer::parseInt)
+          .orElse(0);
+    }
+  }
+
+}
diff --git a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGenerator.java b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGenerator.java
deleted file mode 100644
index 35defc90a06f8c6a834c54fdd69388106b5c3ceb..0000000000000000000000000000000000000000
--- a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGenerator.java
+++ /dev/null
@@ -1,87 +0,0 @@
-package uc3.workloadGenerator;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import kafkaSender.KafkaRecordSender;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-	public static void main(final String[] args) throws InterruptedException, IOException {
-		// uc1
-
-		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
-		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-		final boolean sendRegistry = Boolean
-				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
-				"localhost:9092");
-		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-		// create sensorRegistry
-		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-		addChildrens(sensorRegistry.getTopLevelSensor(), numSensor, 0);
-
-		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-				.collect(Collectors.toList());
-
-		if (sendRegistry) {
-			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
-			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-			configPublisher.close();
-			System.out.println("Configuration sent.");
-
-			System.out.println("Now wait 30 seconds");
-			Thread.sleep(30_000);
-			System.out.println("And woke up again :)");
-		}
-
-		final Properties kafkaProperties = new Properties();
-		// kafkaProperties.put("acks", this.acknowledges);
-		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
-				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
-
-		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-		final Random random = new Random();
-
-		for (final String sensor : sensors) {
-			System.out.println("working");
-			final int initialDelay = random.nextInt(periodMs);
-			executor.scheduleAtFixedRate(() -> {
-				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
-			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
-		}
-
-		System.out.println("Wait for termination...");
-		executor.awaitTermination(30, TimeUnit.DAYS);
-		System.out.println("Will terminate now");
-
-	}
-
-	private static void addChildrens(final MutableAggregatedSensor parent, final int numChildren, int nextId) {
-		for (int c = 0; c < numChildren; c++) {
-			parent.addChildMachineSensor("s_" + nextId);
-			nextId++;
-		}
-	}
-
-}
diff --git a/uc4-application/build.gradle b/uc4-application/build.gradle
index 009c083e1fcd3dffcbb358098e2e0e0900f98e07..c89b18b1bfd5a131e58e512e79934e498f182adb 100644
--- a/uc4-application/build.gradle
+++ b/uc4-application/build.gradle
@@ -1,23 +1,5 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-allprojects { 
+allprojects {
 	repositories {
-	    jcenter()
-	    maven {
-	    	url "https://oss.sonatype.org/content/repositories/snapshots/"
-	    }
     	maven {
     		url 'https://packages.confluent.io/maven/'
     	}
@@ -25,20 +7,7 @@ allprojects {
 }
 
 dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
     compile('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT')
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "uc4.application.HistoryService"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
+mainClassName = "spesb.uc4.application.HistoryService"
diff --git a/uc4-application/src/main/java/uc4/application/ConfigurationKeys.java b/uc4-application/src/main/java/spesb/uc4/application/ConfigurationKeys.java
similarity index 96%
rename from uc4-application/src/main/java/uc4/application/ConfigurationKeys.java
rename to uc4-application/src/main/java/spesb/uc4/application/ConfigurationKeys.java
index 1ded012e70ec01ea6f3a7af12d16e6dcedaa7e1f..236601a46447f5c38b6548d2e0762bbb670747e1 100644
--- a/uc4-application/src/main/java/uc4/application/ConfigurationKeys.java
+++ b/uc4-application/src/main/java/spesb/uc4/application/ConfigurationKeys.java
@@ -1,4 +1,4 @@
-package uc4.application;
+package spesb.uc4.application;
 
 /**
  * Keys to access configuration parameters.
diff --git a/uc4-application/src/main/java/uc4/application/HistoryService.java b/uc4-application/src/main/java/spesb/uc4/application/HistoryService.java
similarity index 95%
rename from uc4-application/src/main/java/uc4/application/HistoryService.java
rename to uc4-application/src/main/java/spesb/uc4/application/HistoryService.java
index b6f9c13e018aead3dcb41ad93b88de0dc8b96743..f86f0cb7e3bc6840db52ce7bdbbac054cdd05e13 100644
--- a/uc4-application/src/main/java/uc4/application/HistoryService.java
+++ b/uc4-application/src/main/java/spesb/uc4/application/HistoryService.java
@@ -1,11 +1,11 @@
-package uc4.application;
+package spesb.uc4.application;
 
 import java.time.Duration;
 import java.util.concurrent.CompletableFuture;
 import org.apache.commons.configuration2.Configuration;
 import org.apache.kafka.streams.KafkaStreams;
+import spesb.uc4.streamprocessing.KafkaStreamsBuilder;
 import titan.ccp.common.configuration.Configurations;
-import uc4.streamprocessing.KafkaStreamsBuilder;
 
 /**
  * A microservice that manages the history and, therefore, stores and aggregates incoming
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKey.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKey.java
similarity index 94%
rename from uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKey.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKey.java
index 2b9cc8596639bd90be7c6526cd5487195355d30e..a3ae3461d055694669e4d874930d5ade9dd83658 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKey.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKey.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.DayOfWeek;
 
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKeyFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKeyFactory.java
similarity index 93%
rename from uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKeyFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKeyFactory.java
index 2d3f4458923e65c994aec3d315c2d3e39ba7c7b8..222785ca8a2d8db72c81929a216fc53b43d06ec0 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKeyFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKeyFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.DayOfWeek;
 import java.time.LocalDateTime;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKeySerde.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKeySerde.java
similarity index 96%
rename from uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKeySerde.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKeySerde.java
index 282a9f579547de704414f54739df01a74421296e..9c246f912ffc67ff6fb8d211a99d478cb58c2898 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekKeySerde.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekKeySerde.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.DayOfWeek;
 import org.apache.kafka.common.serialization.Serde;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekRecordFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekRecordFactory.java
similarity index 95%
rename from uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekRecordFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekRecordFactory.java
index da984eacea0ad5c048836d23f363197315b04bb2..bdfecdbc4857b4d7a630b4afa07de39618435544 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/DayOfWeekRecordFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/DayOfWeekRecordFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import com.google.common.math.Stats;
 import org.apache.kafka.streams.kstream.Windowed;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKey.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKey.java
similarity index 93%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKey.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKey.java
index 2bf346064ad14f7a3a2758ea7348e77ea15db9d7..b07a54d6f22ebbacb77e53a7733f3a16d539fde2 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKey.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKey.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 /**
  * Composed key of an hour of the day and a sensor id.
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKeyFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKeyFactory.java
similarity index 92%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKeyFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKeyFactory.java
index 641a314e7c52fdd4856b22b5d8e40ae3f5f0895d..a13de14229dfbb5a201dc282d05a8c4f97394250 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKeyFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKeyFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.LocalDateTime;
 
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKeySerde.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKeySerde.java
similarity index 96%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKeySerde.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKeySerde.java
index 93965b3e81ce4e8adb4450da7e270fc30866ec3c..a938813c6e1239b8af81c49ef7d83800bfef9b9d 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayKeySerde.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayKeySerde.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import org.apache.kafka.common.serialization.Serde;
 import titan.ccp.common.kafka.simpleserdes.BufferSerde;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayRecordFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayRecordFactory.java
similarity index 95%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfDayRecordFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayRecordFactory.java
index 15710bfa70083ab2c262f992b8819cdb9a9cccdb..25fb9193d1f343a246451ef2a5309198fc39ffde 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfDayRecordFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfDayRecordFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import com.google.common.math.Stats;
 import org.apache.kafka.streams.kstream.Windowed;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKey.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKey.java
similarity index 95%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKey.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKey.java
index b007514a86a6e337cef4eb27938733de941eb7c3..81d33f3042796ecb3c890e73a82e879ab2d0ac6e 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKey.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKey.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.DayOfWeek;
 
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKeyFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKeyFactory.java
similarity index 94%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKeyFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKeyFactory.java
index 59c0441395d3181e3fe4c4958437c7e9cbdbed08..980549309ce94b2e4a4c6da0835b8adfe47bb61e 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKeyFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKeyFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.DayOfWeek;
 import java.time.LocalDateTime;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKeySerde.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKeySerde.java
similarity index 96%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKeySerde.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKeySerde.java
index 2dbe18b93d456394c124c6e202427f971a097d37..63a6a445bf46f521a220816896529a081a15bca0 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekKeySerde.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekKeySerde.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.DayOfWeek;
 import org.apache.kafka.common.serialization.Serde;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekRecordFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekRecordFactory.java
similarity index 95%
rename from uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekRecordFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekRecordFactory.java
index 7f3b66344c634c39911ffddc27353f2b11a1b3bb..358e3d1a5acf8bfd9f4fca7c95b84bd5b13bea53 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/HourOfWeekRecordFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/HourOfWeekRecordFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import com.google.common.math.Stats;
 import org.apache.kafka.streams.kstream.Windowed;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/KafkaStreamsBuilder.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/KafkaStreamsBuilder.java
similarity index 99%
rename from uc4-application/src/main/java/uc4/streamprocessing/KafkaStreamsBuilder.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/KafkaStreamsBuilder.java
index c351eac687431b87f20e4b8ab6fc90fa57558778..9cbff4f61ec5975e3dcdfc5c4e4a9f900e6707ec 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/KafkaStreamsBuilder.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/KafkaStreamsBuilder.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/RecordDatabaseAdapter.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/RecordDatabaseAdapter.java
similarity index 98%
rename from uc4-application/src/main/java/uc4/streamprocessing/RecordDatabaseAdapter.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/RecordDatabaseAdapter.java
index d2230abd223bc2588e00e91f6a17a5e555ed2c4d..9c286cb49206d1eb1f0efbd2e98e3b44bc9a1e22 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/RecordDatabaseAdapter.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/RecordDatabaseAdapter.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.util.Collection;
 import java.util.List;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/StatsKeyFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/StatsKeyFactory.java
similarity index 89%
rename from uc4-application/src/main/java/uc4/streamprocessing/StatsKeyFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/StatsKeyFactory.java
index ab614e32aed415d2d74f2c89e111747fd0826a6a..7e4ac46e461c9083c7929f5dd313fea0526c3d50 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/StatsKeyFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/StatsKeyFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import java.time.LocalDateTime;
 
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/StatsRecordFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/StatsRecordFactory.java
similarity index 95%
rename from uc4-application/src/main/java/uc4/streamprocessing/StatsRecordFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/StatsRecordFactory.java
index bd63a26fd837db274a5b7a67a5f7bff89db693eb..045b512d0561c25889a0a0f8ef05663824412c60 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/StatsRecordFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/StatsRecordFactory.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import com.google.common.math.Stats;
 import org.apache.avro.specific.SpecificRecord;
diff --git a/uc4-application/src/main/java/uc4/streamprocessing/TopologyBuilder.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/TopologyBuilder.java
similarity index 97%
rename from uc4-application/src/main/java/uc4/streamprocessing/TopologyBuilder.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/TopologyBuilder.java
index 4a76ed883d80449d25ace7bb243846e009e8894c..66bb460031f09f1cae77d5e93e3f130aa66f6e90 100644
--- a/uc4-application/src/main/java/uc4/streamprocessing/TopologyBuilder.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/TopologyBuilder.java
@@ -1,4 +1,4 @@
-package uc4.streamprocessing;
+package spesb.uc4.streamprocessing;
 
 import com.google.common.math.Stats;
 import java.time.Duration;
@@ -15,10 +15,10 @@ import org.apache.kafka.streams.kstream.Grouped;
 import org.apache.kafka.streams.kstream.Materialized;
 import org.apache.kafka.streams.kstream.Produced;
 import org.apache.kafka.streams.kstream.TimeWindows;
+import spesb.uc4.streamprocessing.util.StatsFactory;
 import titan.ccp.common.kafka.GenericSerde;
 import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
 import titan.ccp.models.records.ActivePowerRecordFactory;
-import uc4.streamprocessing.util.StatsFactory;
 
 /**
  * Builds Kafka Stream Topology for the History microservice.
diff --git a/uc3-application/src/main/java/uc3/streamprocessing/util/StatsFactory.java b/uc4-application/src/main/java/spesb/uc4/streamprocessing/util/StatsFactory.java
similarity index 92%
rename from uc3-application/src/main/java/uc3/streamprocessing/util/StatsFactory.java
rename to uc4-application/src/main/java/spesb/uc4/streamprocessing/util/StatsFactory.java
index 030a1d8aefa617eb0cadd804b09fa2f10ba5a696..39fe573445984f237d600753c8c828eb2869913b 100644
--- a/uc3-application/src/main/java/uc3/streamprocessing/util/StatsFactory.java
+++ b/uc4-application/src/main/java/spesb/uc4/streamprocessing/util/StatsFactory.java
@@ -1,4 +1,4 @@
-package uc3.streamprocessing.util;
+package spesb.uc4.streamprocessing.util;
 
 import com.google.common.math.Stats;
 import com.google.common.math.StatsAccumulator;
diff --git a/uc4-workload-generator/build.gradle b/uc4-workload-generator/build.gradle
index 5902b630740148b0cfe0a387d76d16e9e19468e1..8bbdedf4f7c41da73dd2d591b8fd56830d7060b7 100644
--- a/uc4-workload-generator/build.gradle
+++ b/uc4-workload-generator/build.gradle
@@ -1,31 +1 @@
-apply plugin: 'application'
-apply plugin: 'eclipse'
-
-buildscript {
-  repositories {
-    maven {
-      url "https://plugins.gradle.org/m2/"
-    }
-  }
-}
-
-sourceCompatibility = "1.11"
-targetCompatibility = "1.11"
-
-dependencies {
-    compile project(':')
-    
-    compile 'org.slf4j:slf4j-simple:1.6.1'
-
-    // Use JUnit test framework
-    testCompile 'junit:junit:4.12'
-}
-
-mainClassName = "uc4.workloadGenerator.LoadGenerator"
-
-eclipse {
-    classpath {
-       downloadSources=true
-       downloadJavadoc=true
-    }
-}
\ No newline at end of file
+mainClassName = "spesb.uc4.workloadgenerator.LoadGenerator"
diff --git a/uc3-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java b/uc4-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
similarity index 99%
rename from uc3-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
rename to uc4-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
index 6c67cf722b4dce87f0bc197ba80f8f117f82198e..034201411a84d3769dbe8c02a210098c62dca881 100644
--- a/uc3-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
+++ b/uc4-workload-generator/src/main/java/spesb/kafkasender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package kafkaSender;
+package spesb.kafkasender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGenerator.java b/uc4-workload-generator/src/main/java/spesb/uc4/workloadgenerator/LoadGenerator.java
similarity index 90%
rename from uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGenerator.java
rename to uc4-workload-generator/src/main/java/spesb/uc4/workloadgenerator/LoadGenerator.java
index b7685a0815a9c6fe1d39facedcf06f6887063312..bcf4f6d2cc20485f1e361a9fb0e638726f5ed79a 100644
--- a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGenerator.java
+++ b/uc4-workload-generator/src/main/java/spesb/uc4/workloadgenerator/LoadGenerator.java
@@ -1,4 +1,4 @@
-package uc4.workloadGenerator;
+package spesb.uc4.workloadgenerator;
 
 import java.io.IOException;
 import java.util.List;
@@ -10,14 +10,19 @@ import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
-import kafkaSender.KafkaRecordSender;
 import org.apache.kafka.clients.producer.ProducerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import spesb.kafkasender.KafkaRecordSender;
 import titan.ccp.models.records.ActivePowerRecord;
 
 public class LoadGenerator {
 
+  private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class);
+
   public static void main(final String[] args) throws InterruptedException, IOException {
     // uc4
+    LOGGER.info("Start workload generator for use case UC4.");
 
     final int numSensor =
         Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
diff --git a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/ConfigPublisher.java b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/ConfigPublisher.java
deleted file mode 100644
index b126668818780caca1ea7c3c63b2203813130e9b..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/ConfigPublisher.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package uc4.workloadGenerator;
-
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.configuration.events.EventSerde;
-
-public class ConfigPublisher {
-
-  private final String topic;
-
-  private final Producer<Event, String> producer;
-
-  public ConfigPublisher(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, new Properties());
-  }
-
-  public ConfigPublisher(final String bootstrapServers, final String topic,
-      final Properties defaultProperties) {
-    this.topic = topic;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
-
-    this.producer =
-        new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
-  }
-
-  public void publish(final Event event, final String value) {
-    final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
-    try {
-      this.producer.send(record).get();
-    } catch (InterruptedException | ExecutionException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
-
-  public void close() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGeneratorExtrem.java b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGeneratorExtrem.java
deleted file mode 100644
index a864a0f333d9097eece8f4e93440e377500cef84..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGeneratorExtrem.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package uc4.workloadGenerator;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import kafkaSender.KafkaRecordSender;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGeneratorExtrem {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final boolean doNothing =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final int producers =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final SensorRegistry sensorRegistry =
-        buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-        .<KafkaRecordSender<ActivePowerRecord>>generate(
-            () -> new KafkaRecordSender<>(
-                kafkaBootstrapServers,
-                kafkaInputTopic,
-                r -> r.getIdentifier(),
-                r -> r.getTimestamp(),
-                kafkaProperties))
-        .limit(producers)
-        .collect(Collectors.toList());
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    for (int i = 0; i < threads; i++) {
-      final int threadId = i;
-      new Thread(() -> {
-        while (true) {
-          for (final String sensor : sensors) {
-            if (!doNothing) {
-              kafkaRecordSenders.get(threadId % producers).write(new ActivePowerRecord(
-                  sensor,
-                  System.currentTimeMillis(),
-                  value));
-            }
-          }
-        }
-      }).start();
-    }
-
-    while (true) {
-      printCpuUsagePerThread();
-    }
-
-    // System.out.println("Wait for termination...");
-    // Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-    // System.out.println("Will terminate now");
-  }
-
-  private static void printCpuUsagePerThread() throws InterruptedException {
-    final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-    final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-    final long start = System.nanoTime();
-    final long[] startCpuTimes = new long[threads.size()];
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-    }
-
-    Thread.sleep(5000);
-
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-      final long dur = System.nanoTime() - start;
-      final double util = (double) cpuTime / dur;
-      System.out.println(
-          "Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-    }
-  }
-
-  private static SensorRegistry buildSensorRegistry(final String hierarchy,
-      final int numNestedGroups, final int numSensor) {
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-    return sensorRegistry;
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}