diff --git a/Deployment/docker-compose/uc1-docker-compose/docker-compose.yml b/Deployment/docker-compose/uc1-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..ba288cb83cf649030577e6331fee49f46316ee52
--- /dev/null
+++ b/Deployment/docker-compose/uc1-docker-compose/docker-compose.yml
@@ -0,0 +1,26 @@
+version: '2'
+services:
+  zookeeper:
+    image: wurstmeister/zookeeper
+    ports:
+      - "2181:2181"
+  kafka:
+    image: wurstmeister/kafka
+    ports:
+      - "9092:9092"
+    expose:
+      - "9092"
+    environment:
+      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+  uc-app:
+    image: benediktwetzel/uc1-app:latest
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+  uc-wg: 
+    image: benediktwetzel/uc1-wg:latest
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      NUM_SENSORS: 1
diff --git a/Deployment/docker-compose/uc2-docker-compose/docker-compose.yml b/Deployment/docker-compose/uc2-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..20a7a73c99c102fe90fa3d4eaa9935dba5298a94
--- /dev/null
+++ b/Deployment/docker-compose/uc2-docker-compose/docker-compose.yml
@@ -0,0 +1,26 @@
+version: '2'
+services:
+  zookeeper:
+    image: wurstmeister/zookeeper
+    ports:
+      - "2181:2181"
+  kafka:
+    image: wurstmeister/kafka
+    ports:
+      - "9092:9092"
+    expose:
+      - "9092"
+    environment:
+      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+  uc-app:
+    image: benediktwetzel/uc2-app:latest
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+  uc-wg: 
+    image: benediktwetzel/uc2-wg:latest
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      NUM_SENSORS: 1
\ No newline at end of file
diff --git a/Deployment/docker-compose/uc3-docker-compose/docker-compose.yml b/Deployment/docker-compose/uc3-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..2cb0d883acc38e0d24434faf4e7af82ff3c42a81
--- /dev/null
+++ b/Deployment/docker-compose/uc3-docker-compose/docker-compose.yml
@@ -0,0 +1,27 @@
+version: '2'
+services:
+  zookeeper:
+    image: wurstmeister/zookeeper
+    ports:
+      - "2181:2181"
+  kafka:
+    image: wurstmeister/kafka
+    ports:
+      - "9092:9092"
+    expose:
+      - "9092"
+    environment:
+      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+  uc-app:
+    image: benediktwetzel/uc3-app:latest
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      KAFKA_WINDOW_DURATION_MINUTES: 60
+  uc-wg: 
+    image: benediktwetzel/uc3-wg:latest
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+      NUM_SENSORS: 1
\ No newline at end of file
diff --git a/Deployment/docker-compose/uc4-docker-compose/docker-compose.yml b/Deployment/docker-compose/uc4-docker-compose/docker-compose.yml
new file mode 100755
index 0000000000000000000000000000000000000000..3347cfa9d4ddc38a7c867823abc3e7cb9d5fb319
--- /dev/null
+++ b/Deployment/docker-compose/uc4-docker-compose/docker-compose.yml
@@ -0,0 +1,25 @@
+version: '2'
+services:
+  zookeeper:
+    image: wurstmeister/zookeeper
+    ports:
+      - "2181:2181"
+  kafka:
+    image: wurstmeister/kafka
+    ports:
+      - "9092:9092"
+    expose:
+      - "9092"
+    environment:
+      KAFKA_ADVERTISED_HOST_NAME: kafka #172.17.0.1 # Replace with docker network
+      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
+      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
+      KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
+  uc-app:
+    image: benediktwetzel/uc2-app:latest #TODO
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
+  uc-wg: 
+    image: benediktwetzel/uc2-wg:latest #TODO
+    environment:
+      KAFKA_BOOTSTRAP_SERVERS: kafka:9092
diff --git a/uc1-application/Dockerfile b/uc1-application/Dockerfile
index 9b17de3af09feb1af8d14fd277d88c8c2797142e..09c36f42afe730a2fc6ba59bbc2082aa8b715f68 100644
--- a/uc1-application/Dockerfile
+++ b/uc1-application/Dockerfile
@@ -1,6 +1,7 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/exp-bigdata19-bridge.tar /
+ADD build/distributions/uc1-application.tar /
 
-CMD export JAVA_OPTS=-Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL \
-    && /exp-bigdata19-bridge/bin/exp-bigdata19-bridge
\ No newline at end of file
+
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc1-application/bin/uc1-application
\ No newline at end of file
diff --git a/uc1-application/build.gradle b/uc1-application/build.gradle
index 12e597b37f775a7ad48a7e6009ed075213e1712b..3fe8803745e42682cf43d068779e63183d62c792 100644
--- a/uc1-application/build.gradle
+++ b/uc1-application/build.gradle
@@ -21,7 +21,7 @@ dependencies {
     testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "titan.ccp.kiekerbridge.expbigdata19.ExperimentorBigData"
+mainClassName = "uc1.application.HistoryService"
 
 eclipse {
     classpath {
diff --git a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java b/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
deleted file mode 100644
index a50bbd942fccf5f8899414fe8cb7b82ad6953f87..0000000000000000000000000000000000000000
--- a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.Objects;
-
-public class ExperimentorBigData {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String modus = Objects.requireNonNullElse(System.getenv("MODUS"), "LoadCounter");
-
-    if (modus.equals("LoadGenerator")) {
-      LoadGenerator.main(args);
-    } else if (modus.equals("LoadGeneratorExtrem")) {
-      LoadGeneratorExtrem.main(args);
-    } else if (modus.equals("LoadCounter")) {
-      LoadCounter.main(args);
-    }
-
-  }
-}
diff --git a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java b/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
deleted file mode 100644
index 798f3014446605afab2cf20f3232896baab02802..0000000000000000000000000000000000000000
--- a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import com.google.common.math.StatsAccumulator;
-import java.time.Duration;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.Deserializer;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
-
-public class LoadCounter {
-
-  public static void main(final String[] args) throws InterruptedException {
-
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaOutputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_OUTPUT_TOPIC"), "output");
-
-    final Properties props = new Properties();
-    props.setProperty("bootstrap.servers", kafkaBootstrapServers);
-    props.setProperty("group.id", "load-counter");
-    props.setProperty("enable.auto.commit", "true");
-    props.setProperty("auto.commit.interval.ms", "1000");
-    props.setProperty("max.poll.records", "1000000");
-    props.setProperty("max.partition.fetch.bytes", "134217728"); // 128 MB
-    props.setProperty("key.deserializer",
-        "org.apache.kafka.common.serialization.StringDeserializer");
-    props.setProperty("value.deserializer",
-        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
-    final Deserializer<AggregatedActivePowerRecord> deserializer =
-        IMonitoringRecordSerde.deserializer(new AggregatedActivePowerRecordFactory());
-
-    final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
-    consumer.subscribe(List.of(kafkaInputTopic, kafkaOutputTopic));
-
-    executor.scheduleAtFixedRate(
-        () -> {
-          final long time = System.currentTimeMillis();
-          final ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(500));
-
-          long inputCount = 0;
-          for (final ConsumerRecord<String, byte[]> inputRecord : records
-              .records(kafkaInputTopic)) {
-            inputCount++;
-          }
-
-          long outputCount = 0;
-          final StatsAccumulator statsAccumulator = new StatsAccumulator();
-          for (final ConsumerRecord<String, byte[]> outputRecord : records
-              .records(kafkaOutputTopic)) {
-            outputCount++;
-            final AggregatedActivePowerRecord record =
-                deserializer.deserialize(kafkaOutputTopic, outputRecord.value());
-            final long latency = time - record.getTimestamp();
-            statsAccumulator.add(latency);
-          }
-
-          final double latency = statsAccumulator.count() > 0 ? statsAccumulator.mean() : 0.0;
-
-          final long elapsedTime = System.currentTimeMillis() - time;
-          System.out
-              .println("input," + time + ',' + elapsedTime + ',' + 0 + ',' + inputCount);
-          System.out
-              .println("output," + time + ',' + elapsedTime + ',' + latency + ',' + outputCount);
-        },
-        0,
-        1,
-        TimeUnit.SECONDS);
-  }
-
-}
diff --git a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java b/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
deleted file mode 100644
index 97a7c84f872f3ab676128d903ae121c376bf7608..0000000000000000000000000000000000000000
--- a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int periodMs =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers, kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(
-          () -> {
-            kafkaRecordSender.write(new ActivePowerRecord(
-                sensor,
-                System.currentTimeMillis(),
-                value));
-          },
-          initialDelay,
-          periodMs,
-          TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
-
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc1-application/src/main/java/uc1/application/ConfigurationKeys.java b/uc1-application/src/main/java/uc1/application/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..27bf70b96364fd58bd8a8df59af6e8f38fcc9b29
--- /dev/null
+++ b/uc1-application/src/main/java/uc1/application/ConfigurationKeys.java
@@ -0,0 +1,23 @@
+package uc1.application;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+
+	public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+
+	public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
+
+	public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
+
+	public static final String NUM_THREADS = "num.threads";
+
+	public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+	public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
+
+	private ConfigurationKeys() {
+	}
+
+}
diff --git a/uc1-application/src/main/java/uc1/application/HistoryService.java b/uc1-application/src/main/java/uc1/application/HistoryService.java
new file mode 100644
index 0000000000000000000000000000000000000000..23b1e4b84877221ae80b5c406fffe07c0cabb90c
--- /dev/null
+++ b/uc1-application/src/main/java/uc1/application/HistoryService.java
@@ -0,0 +1,50 @@
+package uc1.application;
+
+import java.util.concurrent.CompletableFuture;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.kafka.streams.KafkaStreams;
+import titan.ccp.common.configuration.Configurations;
+import uc1.streamprocessing.KafkaStreamsBuilder;
+
+/**
+ * A microservice that manages the history and, therefore, stores and aggregates
+ * incoming measurements.
+ *
+ */
+public class HistoryService {
+
+	private final Configuration config = Configurations.create();
+
+	private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
+
+	/**
+	 * Start the service.
+	 *
+	 * @return {@link CompletableFuture} which is completed when the service is
+	 *         successfully started.
+	 */
+	public void run() {
+		this.createKafkaStreamsApplication();
+	}
+
+	/**
+	 * Build and start the underlying Kafka Streams application of the service.
+	 *
+	 */
+	private void createKafkaStreamsApplication() {
+
+		final KafkaStreams kafkaStreams = new KafkaStreamsBuilder()
+				.bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+				.inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
+				.numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
+				.commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
+				.cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING)).build();
+		this.stopEvent.thenRun(kafkaStreams::close);
+		kafkaStreams.start();
+	}
+
+	public static void main(final String[] args) {
+		new HistoryService().run();
+	}
+
+}
diff --git a/uc1-application/src/main/java/uc1/streamprocessing/KafkaStreamsBuilder.java b/uc1-application/src/main/java/uc1/streamprocessing/KafkaStreamsBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..706cf79022b2485b349bfe7ae144145dda013d20
--- /dev/null
+++ b/uc1-application/src/main/java/uc1/streamprocessing/KafkaStreamsBuilder.java
@@ -0,0 +1,92 @@
+package uc1.streamprocessing;
+
+import java.util.Objects;
+import java.util.Properties;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.StreamsConfig;
+import titan.ccp.common.kafka.streams.PropertiesBuilder;
+
+/**
+ * Builder for the Kafka Streams configuration.
+ */
+public class KafkaStreamsBuilder {
+
+  private static final String APPLICATION_NAME = "titan-ccp-history";
+  private static final String APPLICATION_VERSION = "0.0.1";
+
+  // private static final Logger LOGGER = LoggerFactory.getLogger(KafkaStreamsBuilder.class);
+
+  private String bootstrapServers; // NOPMD
+  private String inputTopic; // NOPMD
+  private int numThreads = -1; // NOPMD
+  private int commitIntervalMs = -1; // NOPMD
+  private int cacheMaxBytesBuff = -1; // NOPMD
+
+  public KafkaStreamsBuilder inputTopic(final String inputTopic) {
+    this.inputTopic = inputTopic;
+    return this;
+  }
+
+  public KafkaStreamsBuilder bootstrapServers(final String bootstrapServers) {
+    this.bootstrapServers = bootstrapServers;
+    return this;
+  }
+
+  /**
+   * Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
+   * one for using the default.
+   */
+  public KafkaStreamsBuilder numThreads(final int numThreads) {
+    if (numThreads < -1 || numThreads == 0) {
+      throw new IllegalArgumentException("Number of threads must be greater 0 or -1.");
+    }
+    this.numThreads = numThreads;
+    return this;
+  }
+
+  /**
+   * Sets the Kafka Streams property for the frequency with which to save the position (offsets in
+   * source topics) of tasks (commit.interval.ms). Must be zero for processing all record, for
+   * example, when processing bulks of records. Can be minus one for using the default.
+   */
+  public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
+    if (commitIntervalMs < -1) {
+      throw new IllegalArgumentException("Commit interval must be greater or equal -1.");
+    }
+    this.commitIntervalMs = commitIntervalMs;
+    return this;
+  }
+
+  /**
+   * Sets the Kafka Streams property for maximum number of memory bytes to be used for record caches
+   * across all threads (cache.max.bytes.buffering). Must be zero for processing all record, for
+   * example, when processing bulks of records. Can be minus one for using the default.
+   */
+  public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
+    if (cacheMaxBytesBuffering < -1) {
+      throw new IllegalArgumentException("Cache max bytes buffering must be greater or equal -1.");
+    }
+    this.cacheMaxBytesBuff = cacheMaxBytesBuffering;
+    return this;
+  }
+
+  /**
+   * Builds the {@link KafkaStreams} instance.
+   */
+  public KafkaStreams build() {
+    Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
+    // TODO log parameters
+    final TopologyBuilder topologyBuilder = new TopologyBuilder(
+        this.inputTopic);
+    final Properties properties = PropertiesBuilder
+        .bootstrapServers(this.bootstrapServers)
+        .applicationId(APPLICATION_NAME + '-' + APPLICATION_VERSION) // TODO as parameter
+        .set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
+        .set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
+        .set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
+        .set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG")
+        .build();
+    return new KafkaStreams(topologyBuilder.build(), properties);
+  }
+
+}
diff --git a/uc1-application/src/main/java/uc1/streamprocessing/TopologyBuilder.java b/uc1-application/src/main/java/uc1/streamprocessing/TopologyBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..0150045acbb4d85bfb8ea40e786cfe41f35f33f5
--- /dev/null
+++ b/uc1-application/src/main/java/uc1/streamprocessing/TopologyBuilder.java
@@ -0,0 +1,45 @@
+package uc1.streamprocessing;
+
+import com.google.gson.Gson;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.streams.StreamsBuilder;
+import org.apache.kafka.streams.Topology;
+import org.apache.kafka.streams.kstream.Consumed;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
+import titan.ccp.models.records.ActivePowerRecordFactory;
+
+/**
+ * Builds Kafka Stream Topology for the History microservice.
+ */
+public class TopologyBuilder {
+
+	private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
+
+	private final String inputTopic;
+	private final Gson gson;
+
+	private final StreamsBuilder builder = new StreamsBuilder();
+
+	/**
+	 * Create a new {@link TopologyBuilder} using the given topics.
+	 */
+	public TopologyBuilder(final String inputTopic) {
+		this.inputTopic = inputTopic;
+		this.gson = new Gson();
+	}
+
+	/**
+	 * Build the {@link Topology} for the History microservice.
+	 */
+	public Topology build() {
+
+		this.builder
+				.stream(this.inputTopic,
+						Consumed.with(Serdes.String(), IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+				.mapValues(v -> this.gson.toJson(v)).foreach((k, v) -> LOGGER.info("Key: " + k + " Value: " + v));
+
+		return this.builder.build();
+	}
+}
diff --git a/uc1-application/src/main/resources/META-INF/application.properties b/uc1-application/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..d2002fd1c8841368d47017b2ce7939bfc42877aa
--- /dev/null
+++ b/uc1-application/src/main/resources/META-INF/application.properties
@@ -0,0 +1,6 @@
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+num.threads=1
+commit.interval.ms=10
+cache.max.bytes.buffering=-1
diff --git a/uc1-workload-generator/Dockerfile b/uc1-workload-generator/Dockerfile
index 9b17de3af09feb1af8d14fd277d88c8c2797142e..91f18d740fa87d7b03480a3352a1fa0eccc845db 100644
--- a/uc1-workload-generator/Dockerfile
+++ b/uc1-workload-generator/Dockerfile
@@ -1,6 +1,6 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/exp-bigdata19-bridge.tar /
+ADD build/distributions/uc1-workload-generator.tar /
 
-CMD export JAVA_OPTS=-Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL \
-    && /exp-bigdata19-bridge/bin/exp-bigdata19-bridge
\ No newline at end of file
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc1-workload-generator/bin/uc1-workload-generator
\ No newline at end of file
diff --git a/uc1-workload-generator/build.gradle b/uc1-workload-generator/build.gradle
index 12e597b37f775a7ad48a7e6009ed075213e1712b..824566a24158d5f535e0dec8ef948903738c9100 100644
--- a/uc1-workload-generator/build.gradle
+++ b/uc1-workload-generator/build.gradle
@@ -21,7 +21,7 @@ dependencies {
     testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "titan.ccp.kiekerbridge.expbigdata19.ExperimentorBigData"
+mainClassName = "uc1.workloadGenerator.LoadGenerator"
 
 eclipse {
     classpath {
diff --git a/uc1-application/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java b/uc1-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
similarity index 98%
rename from uc1-application/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
rename to uc1-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
index b46128c8ebfd52aeeb7127777bb6530761f35181..6c67cf722b4dce87f0bc197ba80f8f117f82198e 100644
--- a/uc1-application/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
+++ b/uc1-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge;
+package kafkaSender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java b/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
deleted file mode 100644
index d5f55a4ab7ca265b241e880363975070e9952c45..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.configuration.events.EventSerde;
-
-public class ConfigPublisher {
-
-  private final String topic;
-
-  private final Producer<Event, String> producer;
-
-  public ConfigPublisher(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, new Properties());
-  }
-
-  public ConfigPublisher(final String bootstrapServers, final String topic,
-      final Properties defaultProperties) {
-    this.topic = topic;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
-
-    this.producer =
-        new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
-  }
-
-  public void publish(final Event event, final String value) {
-    final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
-    try {
-      this.producer.send(record).get();
-    } catch (InterruptedException | ExecutionException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
-
-  public void close() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java b/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
deleted file mode 100644
index a50bbd942fccf5f8899414fe8cb7b82ad6953f87..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.Objects;
-
-public class ExperimentorBigData {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String modus = Objects.requireNonNullElse(System.getenv("MODUS"), "LoadCounter");
-
-    if (modus.equals("LoadGenerator")) {
-      LoadGenerator.main(args);
-    } else if (modus.equals("LoadGeneratorExtrem")) {
-      LoadGeneratorExtrem.main(args);
-    } else if (modus.equals("LoadCounter")) {
-      LoadCounter.main(args);
-    }
-
-  }
-}
diff --git a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java b/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
deleted file mode 100644
index 798f3014446605afab2cf20f3232896baab02802..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import com.google.common.math.StatsAccumulator;
-import java.time.Duration;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.Deserializer;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
-
-public class LoadCounter {
-
-  public static void main(final String[] args) throws InterruptedException {
-
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaOutputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_OUTPUT_TOPIC"), "output");
-
-    final Properties props = new Properties();
-    props.setProperty("bootstrap.servers", kafkaBootstrapServers);
-    props.setProperty("group.id", "load-counter");
-    props.setProperty("enable.auto.commit", "true");
-    props.setProperty("auto.commit.interval.ms", "1000");
-    props.setProperty("max.poll.records", "1000000");
-    props.setProperty("max.partition.fetch.bytes", "134217728"); // 128 MB
-    props.setProperty("key.deserializer",
-        "org.apache.kafka.common.serialization.StringDeserializer");
-    props.setProperty("value.deserializer",
-        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
-    final Deserializer<AggregatedActivePowerRecord> deserializer =
-        IMonitoringRecordSerde.deserializer(new AggregatedActivePowerRecordFactory());
-
-    final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
-    consumer.subscribe(List.of(kafkaInputTopic, kafkaOutputTopic));
-
-    executor.scheduleAtFixedRate(
-        () -> {
-          final long time = System.currentTimeMillis();
-          final ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(500));
-
-          long inputCount = 0;
-          for (final ConsumerRecord<String, byte[]> inputRecord : records
-              .records(kafkaInputTopic)) {
-            inputCount++;
-          }
-
-          long outputCount = 0;
-          final StatsAccumulator statsAccumulator = new StatsAccumulator();
-          for (final ConsumerRecord<String, byte[]> outputRecord : records
-              .records(kafkaOutputTopic)) {
-            outputCount++;
-            final AggregatedActivePowerRecord record =
-                deserializer.deserialize(kafkaOutputTopic, outputRecord.value());
-            final long latency = time - record.getTimestamp();
-            statsAccumulator.add(latency);
-          }
-
-          final double latency = statsAccumulator.count() > 0 ? statsAccumulator.mean() : 0.0;
-
-          final long elapsedTime = System.currentTimeMillis() - time;
-          System.out
-              .println("input," + time + ',' + elapsedTime + ',' + 0 + ',' + inputCount);
-          System.out
-              .println("output," + time + ',' + elapsedTime + ',' + latency + ',' + outputCount);
-        },
-        0,
-        1,
-        TimeUnit.SECONDS);
-  }
-
-}
diff --git a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java b/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
deleted file mode 100644
index 97a7c84f872f3ab676128d903ae121c376bf7608..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int periodMs =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers, kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(
-          () -> {
-            kafkaRecordSender.write(new ActivePowerRecord(
-                sensor,
-                System.currentTimeMillis(),
-                value));
-          },
-          initialDelay,
-          periodMs,
-          TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
-
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java b/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
deleted file mode 100644
index 5bfb6ad488e90f39ded2b9e4cb57d10099f1c538..0000000000000000000000000000000000000000
--- a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGeneratorExtrem {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final boolean doNothing =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final int producers =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final SensorRegistry sensorRegistry =
-        buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-        .<KafkaRecordSender<ActivePowerRecord>>generate(
-            () -> new KafkaRecordSender<>(
-                kafkaBootstrapServers,
-                kafkaInputTopic,
-                r -> r.getIdentifier(),
-                r -> r.getTimestamp(),
-                kafkaProperties))
-        .limit(producers)
-        .collect(Collectors.toList());
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    for (int i = 0; i < threads; i++) {
-      final int threadId = i;
-      new Thread(() -> {
-        while (true) {
-          for (final String sensor : sensors) {
-            if (!doNothing) {
-              kafkaRecordSenders.get(threadId % producers).write(new ActivePowerRecord(
-                  sensor,
-                  System.currentTimeMillis(),
-                  value));
-            }
-          }
-        }
-      }).start();
-    }
-
-    while (true) {
-      printCpuUsagePerThread();
-    }
-
-    // System.out.println("Wait for termination...");
-    // Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-    // System.out.println("Will terminate now");
-  }
-
-  private static void printCpuUsagePerThread() throws InterruptedException {
-    final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-    final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-    final long start = System.nanoTime();
-    final long[] startCpuTimes = new long[threads.size()];
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-    }
-
-    Thread.sleep(5000);
-
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-      final long dur = System.nanoTime() - start;
-      final double util = (double) cpuTime / dur;
-      System.out.println(
-          "Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-    }
-  }
-
-  private static SensorRegistry buildSensorRegistry(final String hierarchy,
-      final int numNestedGroups, final int numSensor) {
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-    return sensorRegistry;
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/ConfigPublisher.java b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/ConfigPublisher.java
new file mode 100644
index 0000000000000000000000000000000000000000..d0201b4dadeb8955c6505f95f2d6d333d427bd5b
--- /dev/null
+++ b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/ConfigPublisher.java
@@ -0,0 +1,48 @@
+package uc1.workloadGenerator;
+
+import java.util.Properties;
+import java.util.concurrent.ExecutionException;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.serialization.StringSerializer;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.configuration.events.EventSerde;
+
+public class ConfigPublisher {
+
+	private final String topic;
+
+	private final Producer<Event, String> producer;
+
+	public ConfigPublisher(final String bootstrapServers, final String topic) {
+		this(bootstrapServers, topic, new Properties());
+	}
+
+	public ConfigPublisher(final String bootstrapServers, final String topic, final Properties defaultProperties) {
+		this.topic = topic;
+
+		final Properties properties = new Properties();
+		properties.putAll(defaultProperties);
+		properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+		properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
+		properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
+
+		this.producer = new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
+	}
+
+	public void publish(final Event event, final String value) {
+		final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
+		try {
+			this.producer.send(record).get();
+		} catch (InterruptedException | ExecutionException e) {
+			throw new IllegalArgumentException(e);
+		}
+	}
+
+	public void close() {
+		this.producer.close();
+	}
+
+}
diff --git a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGenerator.java b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..f75f8018b3f32cfe003b09c0f2481fdd56dc8be3
--- /dev/null
+++ b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGenerator.java
@@ -0,0 +1,87 @@
+package uc1.workloadGenerator;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import kafkaSender.KafkaRecordSender;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGenerator {
+
+	public static void main(final String[] args) throws InterruptedException, IOException {
+		// uc1
+
+		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
+		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+		final boolean sendRegistry = Boolean
+				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
+		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+				"localhost:9092");
+		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+		// create sensorRegistry
+		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+		addChildrens(sensorRegistry.getTopLevelSensor(), numSensor, 0);
+
+		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
+				.collect(Collectors.toList());
+
+		// TODO Brauchen wir das ?
+		if (sendRegistry) {
+			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
+			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+			configPublisher.close();
+			System.out.println("Configuration sent.");
+
+			System.out.println("Now wait 30 seconds");
+			Thread.sleep(30_000);
+			System.out.println("And woke up again :)");
+		}
+
+		final Properties kafkaProperties = new Properties();
+		// kafkaProperties.put("acks", this.acknowledges);
+		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
+				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
+
+		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+		final Random random = new Random();
+
+		for (final String sensor : sensors) {
+			final int initialDelay = random.nextInt(periodMs);
+			executor.scheduleAtFixedRate(() -> {
+				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+		}
+
+		System.out.println("Wait for termination...");
+		executor.awaitTermination(30, TimeUnit.DAYS);
+		System.out.println("Will terminate now");
+
+	}
+
+	private static void addChildrens(final MutableAggregatedSensor parent, final int numChildren, int nextId) {
+		for (int c = 0; c < numChildren; c++) {
+			parent.addChildMachineSensor("s_" + nextId);
+			nextId++;
+		}
+	}
+
+}
diff --git a/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGeneratorExtrem.java b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGeneratorExtrem.java
new file mode 100644
index 0000000000000000000000000000000000000000..1670778fd0136f7f1386390776384faeb8594712
--- /dev/null
+++ b/uc1-workload-generator/src/main/java/uc1/workloadGenerator/LoadGeneratorExtrem.java
@@ -0,0 +1,147 @@
+package uc1.workloadGenerator;
+
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.ThreadMXBean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import kafkaSender.KafkaRecordSender;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.model.sensorregistry.SensorRegistry;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGeneratorExtrem {
+
+	public static void main(final String[] args) throws InterruptedException, IOException {
+
+		final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
+		final int numNestedGroups = Integer
+				.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
+		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
+		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+		final boolean sendRegistry = Boolean
+				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
+		final boolean doNothing = Boolean
+				.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
+		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+		final int producers = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
+		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+				"localhost:9092");
+		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+		final SensorRegistry sensorRegistry = buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
+
+		if (sendRegistry) {
+			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
+			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+			configPublisher.close();
+			System.out.println("Configuration sent.");
+
+			System.out.println("Now wait 30 seconds");
+			Thread.sleep(30_000);
+			System.out.println("And woke up again :)");
+		}
+
+		final Properties kafkaProperties = new Properties();
+		// kafkaProperties.put("acks", this.acknowledges);
+		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+		final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
+				.<KafkaRecordSender<ActivePowerRecord>>generate(() -> new KafkaRecordSender<>(kafkaBootstrapServers,
+						kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties))
+				.limit(producers).collect(Collectors.toList());
+
+		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
+				.collect(Collectors.toList());
+
+		for (int i = 0; i < threads; i++) {
+			final int threadId = i;
+			new Thread(() -> {
+				while (true) {
+					for (final String sensor : sensors) {
+						if (!doNothing) {
+							kafkaRecordSenders.get(threadId % producers)
+									.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+						}
+					}
+				}
+			}).start();
+		}
+
+		while (true) {
+			printCpuUsagePerThread();
+		}
+
+		// System.out.println("Wait for termination...");
+		// Thread.sleep(30 * 24 * 60 * 60 * 1000L);
+		// System.out.println("Will terminate now");
+	}
+
+	private static void printCpuUsagePerThread() throws InterruptedException {
+		final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
+		final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
+
+		final long start = System.nanoTime();
+		final long[] startCpuTimes = new long[threads.size()];
+		for (int i = 0; i < threads.size(); i++) {
+			final Thread thread = threads.get(i);
+			startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
+		}
+
+		Thread.sleep(5000);
+
+		for (int i = 0; i < threads.size(); i++) {
+			final Thread thread = threads.get(i);
+			final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
+			final long dur = System.nanoTime() - start;
+			final double util = (double) cpuTime / dur;
+			System.out.println("Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
+		}
+	}
+
+	private static SensorRegistry buildSensorRegistry(final String hierarchy, final int numNestedGroups,
+			final int numSensor) {
+		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+		if (hierarchy.equals("deep")) {
+			MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
+			for (int lvl = 1; lvl < numNestedGroups; lvl++) {
+				lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
+			}
+			for (int s = 0; s < numSensor; s++) {
+				lastSensor.addChildMachineSensor("sensor_" + s);
+			}
+		} else if (hierarchy.equals("full")) {
+			addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
+		} else {
+			throw new IllegalStateException();
+		}
+		return sensorRegistry;
+	}
+
+	private static int addChildren(final MutableAggregatedSensor parent, final int numChildren, final int lvl,
+			final int maxLvl, int nextId) {
+		for (int c = 0; c < numChildren; c++) {
+			if (lvl == maxLvl) {
+				parent.addChildMachineSensor("s_" + nextId);
+				nextId++;
+			} else {
+				final MutableAggregatedSensor newParent = parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
+				nextId++;
+				nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
+			}
+		}
+		return nextId;
+	}
+
+}
diff --git a/uc2-application/Dockerfile b/uc2-application/Dockerfile
index 9b17de3af09feb1af8d14fd277d88c8c2797142e..99076645ab5e1c3b1a77d2aec7408dc8846f9f51 100644
--- a/uc2-application/Dockerfile
+++ b/uc2-application/Dockerfile
@@ -1,6 +1,6 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/exp-bigdata19-bridge.tar /
+ADD build/distributions/uc2-application.tar /
 
-CMD export JAVA_OPTS=-Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL \
-    && /exp-bigdata19-bridge/bin/exp-bigdata19-bridge
\ No newline at end of file
+CMD   JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc2-application/bin/uc2-application
\ No newline at end of file
diff --git a/uc2-application/build.gradle b/uc2-application/build.gradle
index 12e597b37f775a7ad48a7e6009ed075213e1712b..42bc3e0770db50e93bf2a08d5c039677489c4492 100644
--- a/uc2-application/build.gradle
+++ b/uc2-application/build.gradle
@@ -21,7 +21,7 @@ dependencies {
     testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "titan.ccp.kiekerbridge.expbigdata19.ExperimentorBigData"
+mainClassName = "uc2.application.AggregationService"
 
 eclipse {
     classpath {
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/AggregationService.java b/uc2-application/src/main/java/titan/ccp/aggregation/AggregationService.java
deleted file mode 100644
index 0058e939bc5cc50dafe15aaf011bfcc9aa47925e..0000000000000000000000000000000000000000
--- a/uc2-application/src/main/java/titan/ccp/aggregation/AggregationService.java
+++ /dev/null
@@ -1,62 +0,0 @@
-package titan.ccp.aggregation;
-
-import java.time.Duration;
-import java.util.concurrent.CompletableFuture;
-import org.apache.commons.configuration2.Configuration;
-import org.apache.kafka.streams.KafkaStreams;
-import titan.ccp.aggregation.streamprocessing.KafkaStreamsBuilder;
-import titan.ccp.common.configuration.Configurations;
-
-/**
- * A microservice that manages the history and, therefore, stores and aggregates incoming
- * measurements.
- *
- */
-public class AggregationService {
-
-  private final Configuration config = Configurations.create();
-
-  private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
-
-
-  /**
-   * Start the service.
-   */
-  public void run() {
-    this.createKafkaStreamsApplication();
-  }
-
-  public static void main(final String[] args) {
-    new AggregationService().run();
-  }
-
-
-  /**
-   * Build and start the underlying Kafka Streams Application of the service.
-   *
-   * @param clusterSession the database session which the application should use.
-   */
-  private void createKafkaStreamsApplication() {
-    final KafkaStreams kafkaStreams = new KafkaStreamsBuilder()
-        .bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
-        .inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
-        .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
-        .configurationTopic(this.config.getString(ConfigurationKeys.CONFIGURATION_KAFKA_TOPIC))
-        .windowSize(Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_SIZE_MS)))
-        .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_GRACE_MS)))
-        .numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
-        .commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
-        .cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING))
-        .build();
-    this.stopEvent.thenRun(kafkaStreams::close);
-    kafkaStreams.start();
-  }
-
-  /**
-   * Stop the service.
-   */
-  public void stop() {
-    this.stopEvent.complete(null);
-  }
-
-}
diff --git a/uc2-application/src/main/java/uc2/application/AggregationService.java b/uc2-application/src/main/java/uc2/application/AggregationService.java
new file mode 100644
index 0000000000000000000000000000000000000000..696b13f4889a988282467aca3e4241938e636d7c
--- /dev/null
+++ b/uc2-application/src/main/java/uc2/application/AggregationService.java
@@ -0,0 +1,59 @@
+package uc2.application;
+
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.kafka.streams.KafkaStreams;
+import titan.ccp.common.configuration.Configurations;
+import uc2.streamprocessing.KafkaStreamsBuilder;
+
+/**
+ * A microservice that manages the history and, therefore, stores and aggregates
+ * incoming measurements.
+ *
+ */
+public class AggregationService {
+
+	private final Configuration config = Configurations.create();
+
+	private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
+
+	/**
+	 * Start the service.
+	 */
+	public void run() {
+		this.createKafkaStreamsApplication();
+	}
+
+	public static void main(final String[] args) {
+		new AggregationService().run();
+	}
+
+	/**
+	 * Build and start the underlying Kafka Streams Application of the service.
+	 *
+	 * @param clusterSession the database session which the application should use.
+	 */
+	private void createKafkaStreamsApplication() {
+		final KafkaStreams kafkaStreams = new KafkaStreamsBuilder()
+				.bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+				.inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
+				.outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
+				.configurationTopic(this.config.getString(ConfigurationKeys.CONFIGURATION_KAFKA_TOPIC))
+				.windowSize(Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_SIZE_MS)))
+				.gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.WINDOW_GRACE_MS)))
+				.numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
+				.commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
+				.cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING)).build();
+		this.stopEvent.thenRun(kafkaStreams::close);
+		kafkaStreams.start();
+	}
+
+	/**
+	 * Stop the service.
+	 */
+	public void stop() {
+		this.stopEvent.complete(null);
+	}
+
+}
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/ConfigurationKeys.java b/uc2-application/src/main/java/uc2/application/ConfigurationKeys.java
similarity index 96%
rename from uc2-application/src/main/java/titan/ccp/aggregation/ConfigurationKeys.java
rename to uc2-application/src/main/java/uc2/application/ConfigurationKeys.java
index fd6286fea3949137304c280666c9edccbb2554d4..08d5e1eb26535b91462a2954e57037f20e3d62e9 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/ConfigurationKeys.java
+++ b/uc2-application/src/main/java/uc2/application/ConfigurationKeys.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation;
+package uc2.application;
 
 /**
  * Keys to access configuration parameters.
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ChildParentsTransformer.java b/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformer.java
similarity index 98%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ChildParentsTransformer.java
rename to uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformer.java
index 8422a5c937ec3ac003a96776b4aaddfe3bbe1fff..4315aad5bc211d9342ee1703ead357d0786a2e0e 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ChildParentsTransformer.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformer.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.util.Map;
 import java.util.Optional;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ChildParentsTransformerFactory.java b/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformerFactory.java
similarity index 97%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ChildParentsTransformerFactory.java
rename to uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformerFactory.java
index 942858ec77cadbd981fdd538306ea3192e875a44..5029c02446b0b191edf0cc498165465d30516504 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ChildParentsTransformerFactory.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/ChildParentsTransformerFactory.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.util.Map;
 import java.util.Optional;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointFlatTransformer.java b/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformer.java
similarity index 98%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointFlatTransformer.java
rename to uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformer.java
index aa39a56322a74248d54177d30305c8ceec4b79d8..87a1d9967295995ce5dc46e0f1a9f5f52ffae469 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointFlatTransformer.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformer.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import com.google.common.base.MoreObjects;
 import java.util.ArrayList;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointFlatTransformerFactory.java b/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformerFactory.java
similarity index 96%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointFlatTransformerFactory.java
rename to uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformerFactory.java
index adfca6cb67bfa26ccf1499b0f79b7e37ee1da5e4..5ddb07850e4c14418b9014c8a240c677cb548259 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointFlatTransformerFactory.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/JointFlatTransformerFactory.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.util.Map;
 import java.util.Set;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointRecordParents.java b/uc2-application/src/main/java/uc2/streamprocessing/JointRecordParents.java
similarity index 92%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointRecordParents.java
rename to uc2-application/src/main/java/uc2/streamprocessing/JointRecordParents.java
index 6bbf0aad6fd5976791e157957640c1c51c3bd259..74fb5441f9a716af4ddd279b4b5fff0466697a23 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/JointRecordParents.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/JointRecordParents.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.util.Set;
 import titan.ccp.models.records.ActivePowerRecord;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/KafkaStreamsBuilder.java b/uc2-application/src/main/java/uc2/streamprocessing/KafkaStreamsBuilder.java
similarity index 99%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/KafkaStreamsBuilder.java
rename to uc2-application/src/main/java/uc2/streamprocessing/KafkaStreamsBuilder.java
index 0ae0d399d232535da9c445ca0918869d2db1ad9e..eb0643d63f934e7966bca74a7ff7356b2aefb259 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/KafkaStreamsBuilder.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/KafkaStreamsBuilder.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.time.Duration;
 import java.util.Objects;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerde.java b/uc2-application/src/main/java/uc2/streamprocessing/OptionalParentsSerde.java
similarity index 96%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerde.java
rename to uc2-application/src/main/java/uc2/streamprocessing/OptionalParentsSerde.java
index a310b3a68462d73e305fbda179fb5e49eddf5d85..e4624d9531fc476d707d1b712dddb553a69b3823 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerde.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/OptionalParentsSerde.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.util.HashSet;
 import java.util.Optional;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ParentsSerde.java b/uc2-application/src/main/java/uc2/streamprocessing/ParentsSerde.java
similarity index 95%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ParentsSerde.java
rename to uc2-application/src/main/java/uc2/streamprocessing/ParentsSerde.java
index 478b8bcf3ccf19b4ffc02ac1fb41e085c66c4db9..327f33a10b6450c6d16d155314bff76aa18913d9 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/ParentsSerde.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/ParentsSerde.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import java.util.HashSet;
 import java.util.Set;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/RecordAggregator.java b/uc2-application/src/main/java/uc2/streamprocessing/RecordAggregator.java
similarity index 97%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/RecordAggregator.java
rename to uc2-application/src/main/java/uc2/streamprocessing/RecordAggregator.java
index ae7a167c90862eb85a9f75b59fe110c4628ee8bc..0b3e23462ccd61bdd71b485de62c28e89168374a 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/RecordAggregator.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/RecordAggregator.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import org.apache.kafka.streams.kstream.Windowed;
 import titan.ccp.models.records.ActivePowerRecord;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/SensorParentKey.java b/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKey.java
similarity index 93%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/SensorParentKey.java
rename to uc2-application/src/main/java/uc2/streamprocessing/SensorParentKey.java
index 32d77c402ece5b3bf289941a56c9c0f15b3b2576..4cb3bc9c6ec31a6ee086adffb4db188e348c040f 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/SensorParentKey.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKey.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 /**
  * A key consisting of the identifier of a sensor and an identifier of parent sensor.
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerde.java b/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKeySerde.java
similarity index 95%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerde.java
rename to uc2-application/src/main/java/uc2/streamprocessing/SensorParentKeySerde.java
index 2646c7d04439565d7d2f43719bbdb8a1b66633cc..1a2688c2bac2dc3e69d786c6ff395106f0a0f58c 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerde.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/SensorParentKeySerde.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import org.apache.kafka.common.serialization.Serde;
 import titan.ccp.common.kafka.simpleserdes.BufferSerde;
diff --git a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/TopologyBuilder.java b/uc2-application/src/main/java/uc2/streamprocessing/TopologyBuilder.java
similarity index 99%
rename from uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/TopologyBuilder.java
rename to uc2-application/src/main/java/uc2/streamprocessing/TopologyBuilder.java
index 7a67c78372db2abb59af33d4b15109fe7e75c0c2..a6b377b0ead972c89c58d405279a571f545ae91b 100644
--- a/uc2-application/src/main/java/titan/ccp/aggregation/streamprocessing/TopologyBuilder.java
+++ b/uc2-application/src/main/java/uc2/streamprocessing/TopologyBuilder.java
@@ -1,4 +1,4 @@
-package titan.ccp.aggregation.streamprocessing;
+package uc2.streamprocessing;
 
 import com.google.common.math.StatsAccumulator;
 import java.time.Duration;
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java b/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java
index 1bffdaa3ec264dbd871b08d5f47bee1ef40dae82..f92af2b5a908f8c4efb8ec02a00c62b9925cb41f 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java
+++ b/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/OptionalParentsSerdeTest.java
@@ -3,7 +3,7 @@ package titan.ccp.aggregation.streamprocessing;
 import java.util.Optional;
 import java.util.Set;
 import org.junit.Test;
-import titan.ccp.aggregation.streamprocessing.OptionalParentsSerde;
+import uc2.streamprocessing.OptionalParentsSerde;
 
 public class OptionalParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java b/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java
index 12301031c14002c22843e4178eacc2efafae9816..715a14f47ee1d8243070344ea40edba37ee595fd 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java
+++ b/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/ParentsSerdeTest.java
@@ -2,7 +2,7 @@ package titan.ccp.aggregation.streamprocessing;
 
 import java.util.Set;
 import org.junit.Test;
-import titan.ccp.aggregation.streamprocessing.ParentsSerde;
+import uc2.streamprocessing.ParentsSerde;
 
 public class ParentsSerdeTest {
 
diff --git a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java b/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java
index 51b2a052ebe080b505414fb5d514aebcaa3fba00..3090c9efb7e1fa846f5dc10fae0e917802853c39 100644
--- a/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java
+++ b/uc2-application/src/test/java/titan/ccp/aggregation/streamprocessing/SensorParentKeySerdeTest.java
@@ -1,8 +1,8 @@
 package titan.ccp.aggregation.streamprocessing;
 
 import org.junit.Test;
-import titan.ccp.aggregation.streamprocessing.SensorParentKey;
-import titan.ccp.aggregation.streamprocessing.SensorParentKeySerde;
+import uc2.streamprocessing.SensorParentKey;
+import uc2.streamprocessing.SensorParentKeySerde;
 
 public class SensorParentKeySerdeTest {
 
diff --git a/uc2-workload-generator/Dockerfile b/uc2-workload-generator/Dockerfile
index 9b17de3af09feb1af8d14fd277d88c8c2797142e..162243e055732de84d1680dba609425f4068dbc2 100644
--- a/uc2-workload-generator/Dockerfile
+++ b/uc2-workload-generator/Dockerfile
@@ -1,6 +1,6 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/exp-bigdata19-bridge.tar /
+ADD build/distributions/uc2-workload-generator.tar /
 
-CMD export JAVA_OPTS=-Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL \
-    && /exp-bigdata19-bridge/bin/exp-bigdata19-bridge
\ No newline at end of file
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc2-workload-generator/bin/uc2-workload-generator
\ No newline at end of file
diff --git a/uc2-workload-generator/build.gradle b/uc2-workload-generator/build.gradle
index 12e597b37f775a7ad48a7e6009ed075213e1712b..833291011bb60bf84fcec323f2f0f63f9915d245 100644
--- a/uc2-workload-generator/build.gradle
+++ b/uc2-workload-generator/build.gradle
@@ -21,7 +21,7 @@ dependencies {
     testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "titan.ccp.kiekerbridge.expbigdata19.ExperimentorBigData"
+mainClassName = "uc2.workloadGenerator.LoadGenerator"
 
 eclipse {
     classpath {
diff --git a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java b/uc2-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
similarity index 98%
rename from uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
rename to uc2-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
index b46128c8ebfd52aeeb7127777bb6530761f35181..6c67cf722b4dce87f0bc197ba80f8f117f82198e 100644
--- a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
+++ b/uc2-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge;
+package kafkaSender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java b/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
deleted file mode 100644
index a50bbd942fccf5f8899414fe8cb7b82ad6953f87..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.Objects;
-
-public class ExperimentorBigData {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String modus = Objects.requireNonNullElse(System.getenv("MODUS"), "LoadCounter");
-
-    if (modus.equals("LoadGenerator")) {
-      LoadGenerator.main(args);
-    } else if (modus.equals("LoadGeneratorExtrem")) {
-      LoadGeneratorExtrem.main(args);
-    } else if (modus.equals("LoadCounter")) {
-      LoadCounter.main(args);
-    }
-
-  }
-}
diff --git a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java b/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
deleted file mode 100644
index 798f3014446605afab2cf20f3232896baab02802..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import com.google.common.math.StatsAccumulator;
-import java.time.Duration;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.Deserializer;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
-
-public class LoadCounter {
-
-  public static void main(final String[] args) throws InterruptedException {
-
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaOutputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_OUTPUT_TOPIC"), "output");
-
-    final Properties props = new Properties();
-    props.setProperty("bootstrap.servers", kafkaBootstrapServers);
-    props.setProperty("group.id", "load-counter");
-    props.setProperty("enable.auto.commit", "true");
-    props.setProperty("auto.commit.interval.ms", "1000");
-    props.setProperty("max.poll.records", "1000000");
-    props.setProperty("max.partition.fetch.bytes", "134217728"); // 128 MB
-    props.setProperty("key.deserializer",
-        "org.apache.kafka.common.serialization.StringDeserializer");
-    props.setProperty("value.deserializer",
-        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
-    final Deserializer<AggregatedActivePowerRecord> deserializer =
-        IMonitoringRecordSerde.deserializer(new AggregatedActivePowerRecordFactory());
-
-    final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
-    consumer.subscribe(List.of(kafkaInputTopic, kafkaOutputTopic));
-
-    executor.scheduleAtFixedRate(
-        () -> {
-          final long time = System.currentTimeMillis();
-          final ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(500));
-
-          long inputCount = 0;
-          for (final ConsumerRecord<String, byte[]> inputRecord : records
-              .records(kafkaInputTopic)) {
-            inputCount++;
-          }
-
-          long outputCount = 0;
-          final StatsAccumulator statsAccumulator = new StatsAccumulator();
-          for (final ConsumerRecord<String, byte[]> outputRecord : records
-              .records(kafkaOutputTopic)) {
-            outputCount++;
-            final AggregatedActivePowerRecord record =
-                deserializer.deserialize(kafkaOutputTopic, outputRecord.value());
-            final long latency = time - record.getTimestamp();
-            statsAccumulator.add(latency);
-          }
-
-          final double latency = statsAccumulator.count() > 0 ? statsAccumulator.mean() : 0.0;
-
-          final long elapsedTime = System.currentTimeMillis() - time;
-          System.out
-              .println("input," + time + ',' + elapsedTime + ',' + 0 + ',' + inputCount);
-          System.out
-              .println("output," + time + ',' + elapsedTime + ',' + latency + ',' + outputCount);
-        },
-        0,
-        1,
-        TimeUnit.SECONDS);
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java b/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
deleted file mode 100644
index 97a7c84f872f3ab676128d903ae121c376bf7608..0000000000000000000000000000000000000000
--- a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int periodMs =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers, kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(
-          () -> {
-            kafkaRecordSender.write(new ActivePowerRecord(
-                sensor,
-                System.currentTimeMillis(),
-                value));
-          },
-          initialDelay,
-          periodMs,
-          TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
-
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/ConfigPublisher.java
similarity index 97%
rename from uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
rename to uc2-workload-generator/src/main/java/uc2/workloadGenerator/ConfigPublisher.java
index d5f55a4ab7ca265b241e880363975070e9952c45..56625e454b42b6620b21261e7a57969f83707dfe 100644
--- a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
+++ b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/ConfigPublisher.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge.expbigdata19;
+package uc2.workloadGenerator;
 
 import java.util.Properties;
 import java.util.concurrent.ExecutionException;
diff --git a/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGenerator.java b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..c818aadd5e8c61088297f200b134e93e5b765a06
--- /dev/null
+++ b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGenerator.java
@@ -0,0 +1,107 @@
+package uc2.workloadGenerator;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import kafkaSender.KafkaRecordSender;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGenerator {
+
+	public static void main(final String[] args) throws InterruptedException, IOException {
+
+		final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
+		final int numNestedGroups = Integer
+				.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
+		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
+		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+		final boolean sendRegistry = Boolean
+				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
+		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+				"localhost:9092");
+		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+		if (hierarchy.equals("deep")) {
+			MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
+			for (int lvl = 1; lvl < numNestedGroups; lvl++) {
+				lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
+			}
+			for (int s = 0; s < numSensor; s++) {
+				lastSensor.addChildMachineSensor("sensor_" + s);
+			}
+		} else if (hierarchy.equals("full")) {
+			addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
+		} else {
+			throw new IllegalStateException();
+		}
+
+		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
+				.collect(Collectors.toList());
+
+		if (sendRegistry) {
+			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
+			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+			configPublisher.close();
+			System.out.println("Configuration sent.");
+
+			System.out.println("Now wait 30 seconds");
+			Thread.sleep(30_000);
+			System.out.println("And woke up again :)");
+		}
+
+		final Properties kafkaProperties = new Properties();
+		// kafkaProperties.put("acks", this.acknowledges);
+		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
+				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
+
+		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+		final Random random = new Random();
+
+		for (final String sensor : sensors) {
+			final int initialDelay = random.nextInt(periodMs);
+			executor.scheduleAtFixedRate(() -> {
+				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+		}
+
+		System.out.println("Wait for termination...");
+		executor.awaitTermination(30, TimeUnit.DAYS);
+		System.out.println("Will terminate now");
+
+	}
+
+	private static int addChildren(final MutableAggregatedSensor parent, final int numChildren, final int lvl,
+			final int maxLvl, int nextId) {
+		for (int c = 0; c < numChildren; c++) {
+			if (lvl == maxLvl) {
+				parent.addChildMachineSensor("s_" + nextId);
+				nextId++;
+			} else {
+				final MutableAggregatedSensor newParent = parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
+				nextId++;
+				nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
+			}
+		}
+		return nextId;
+	}
+
+}
diff --git a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGeneratorExtrem.java
similarity index 98%
rename from uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
rename to uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGeneratorExtrem.java
index 5bfb6ad488e90f39ded2b9e4cb57d10099f1c538..e13030e23d9dd945553abd9f919d0873e4b23bda 100644
--- a/uc2-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
+++ b/uc2-workload-generator/src/main/java/uc2/workloadGenerator/LoadGeneratorExtrem.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge.expbigdata19;
+package uc2.workloadGenerator;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -9,9 +9,9 @@ import java.util.Objects;
 import java.util.Properties;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
+import kafkaSender.KafkaRecordSender;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
 import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
 import titan.ccp.model.sensorregistry.MutableSensorRegistry;
 import titan.ccp.model.sensorregistry.SensorRegistry;
diff --git a/uc2-workload-generator/src/main/resources/META-INF/application.properties b/uc2-workload-generator/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/uc3-application/Dockerfile b/uc3-application/Dockerfile
index 9b17de3af09feb1af8d14fd277d88c8c2797142e..c70a24268e114e924b5f06dc7a8979100f5d8455 100644
--- a/uc3-application/Dockerfile
+++ b/uc3-application/Dockerfile
@@ -1,6 +1,8 @@
 FROM openjdk:11-slim
 
-ADD build/distributions/exp-bigdata19-bridge.tar /
 
-CMD export JAVA_OPTS=-Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL \
-    && /exp-bigdata19-bridge/bin/exp-bigdata19-bridge
\ No newline at end of file
+ADD build/distributions/uc3-application.tar /
+
+
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc3-application/bin/uc3-application
\ No newline at end of file
diff --git a/uc3-application/build.gradle b/uc3-application/build.gradle
index 12e597b37f775a7ad48a7e6009ed075213e1712b..b0279201322e94d9bd9b14222a2dc218f18b4309 100644
--- a/uc3-application/build.gradle
+++ b/uc3-application/build.gradle
@@ -21,7 +21,7 @@ dependencies {
     testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "titan.ccp.kiekerbridge.expbigdata19.ExperimentorBigData"
+mainClassName = "uc3.application.HistoryService"
 
 eclipse {
     classpath {
diff --git a/uc3-application/resources/META-INF/application.properties b/uc3-application/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..cff8630c825a9ba663271e3001238b8cfe0110a2
--- /dev/null
+++ b/uc3-application/resources/META-INF/application.properties
@@ -0,0 +1,7 @@
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+kafka.window.duration.minutes=60
+num.threads=1
+commit.interval.ms=10
+cache.max.bytes.buffering=-1
diff --git a/uc3-application/src/main/java/uc3/application/ConfigurationKeys.java b/uc3-application/src/main/java/uc3/application/ConfigurationKeys.java
new file mode 100644
index 0000000000000000000000000000000000000000..8849279792e5192c003fa6d82257e3a162cbaac0
--- /dev/null
+++ b/uc3-application/src/main/java/uc3/application/ConfigurationKeys.java
@@ -0,0 +1,25 @@
+package uc3.application;
+
+/**
+ * Keys to access configuration parameters.
+ */
+public final class ConfigurationKeys {
+
+	public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
+
+	public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
+
+	public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
+
+	public static final String NUM_THREADS = "num.threads";
+
+	public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
+
+	public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
+
+	public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
+
+	private ConfigurationKeys() {
+	}
+
+}
diff --git a/uc3-application/src/main/java/uc3/application/HistoryService.java b/uc3-application/src/main/java/uc3/application/HistoryService.java
new file mode 100644
index 0000000000000000000000000000000000000000..26c9584d26cf0313504f093474c1421e5a07b5e8
--- /dev/null
+++ b/uc3-application/src/main/java/uc3/application/HistoryService.java
@@ -0,0 +1,55 @@
+package uc3.application;
+
+import java.time.Duration;
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import org.apache.commons.configuration2.Configuration;
+import org.apache.kafka.streams.KafkaStreams;
+import titan.ccp.common.configuration.Configurations;
+import uc3.streamprocessing.KafkaStreamsBuilder;
+
+/**
+ * A microservice that manages the history and, therefore, stores and aggregates
+ * incoming measurements.
+ *
+ */
+public class HistoryService {
+
+	private final Configuration config = Configurations.create();
+
+	private final CompletableFuture<Void> stopEvent = new CompletableFuture<>();
+	final int KAFKA_WINDOW_DURATION_MINUTES = Integer
+			.parseInt(Objects.requireNonNullElse(System.getenv("KAFKA_WINDOW_DURATION_MINUTES"), "60"));
+
+	/**
+	 * Start the service.
+	 *
+	 * @return {@link CompletableFuture} which is completed when the service is
+	 *         successfully started.
+	 */
+	public void run() {
+		this.createKafkaStreamsApplication();
+	}
+
+	/**
+	 * Build and start the underlying Kafka Streams application of the service.
+	 *
+	 */
+	private void createKafkaStreamsApplication() {
+		final KafkaStreams kafkaStreams = new KafkaStreamsBuilder()
+				.bootstrapServers(this.config.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS))
+				.inputTopic(this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC))
+				.outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC))
+				.windowDuration(Duration.ofMinutes(this.KAFKA_WINDOW_DURATION_MINUTES))
+				.numThreads(this.config.getInt(ConfigurationKeys.NUM_THREADS))
+				.commitIntervalMs(this.config.getInt(ConfigurationKeys.COMMIT_INTERVAL_MS))
+				.cacheMaxBytesBuffering(this.config.getInt(ConfigurationKeys.CACHE_MAX_BYTES_BUFFERING)).build();
+		this.stopEvent.thenRun(kafkaStreams::close);
+		kafkaStreams.start();
+	}
+
+	public static void main(final String[] args) {
+		new HistoryService().run();
+	}
+
+}
diff --git a/uc3-application/src/main/java/uc3/streamprocessing/KafkaStreamsBuilder.java b/uc3-application/src/main/java/uc3/streamprocessing/KafkaStreamsBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..5106ed2ad4fb01fc38143151bbd752f2b98b160d
--- /dev/null
+++ b/uc3-application/src/main/java/uc3/streamprocessing/KafkaStreamsBuilder.java
@@ -0,0 +1,106 @@
+package uc3.streamprocessing;
+
+import java.time.Duration;
+import java.util.Objects;
+import java.util.Properties;
+import org.apache.kafka.streams.KafkaStreams;
+import org.apache.kafka.streams.StreamsConfig;
+import titan.ccp.common.kafka.streams.PropertiesBuilder;
+
+/**
+ * Builder for the Kafka Streams configuration.
+ */
+public class KafkaStreamsBuilder {
+
+	private static final String APPLICATION_NAME = "titan-ccp-history";
+	private static final String APPLICATION_VERSION = "0.0.1";
+
+	// private static final Logger LOGGER =
+	// LoggerFactory.getLogger(KafkaStreamsBuilder.class);
+
+	private String bootstrapServers; // NOPMD
+	private String inputTopic; // NOPMD
+	private String outputTopic; // NOPMD
+	private Duration windowDuration; // NOPMD
+	private int numThreads = -1; // NOPMD
+	private int commitIntervalMs = -1; // NOPMD
+	private int cacheMaxBytesBuff = -1; // NOPMD
+
+	public KafkaStreamsBuilder inputTopic(final String inputTopic) {
+		this.inputTopic = inputTopic;
+		return this;
+	}
+
+	public KafkaStreamsBuilder bootstrapServers(final String bootstrapServers) {
+		this.bootstrapServers = bootstrapServers;
+		return this;
+	}
+
+	public KafkaStreamsBuilder outputTopic(final String outputTopic) {
+		this.outputTopic = outputTopic;
+		return this;
+	}
+
+	public KafkaStreamsBuilder windowDuration(final Duration windowDuration) {
+		this.windowDuration = windowDuration;
+		return this;
+	}
+
+	/**
+	 * Sets the Kafka Streams property for the number of threads
+	 * (num.stream.threads). Can be minus one for using the default.
+	 */
+	public KafkaStreamsBuilder numThreads(final int numThreads) {
+		if (numThreads < -1 || numThreads == 0) {
+			throw new IllegalArgumentException("Number of threads must be greater 0 or -1.");
+		}
+		this.numThreads = numThreads;
+		return this;
+	}
+
+	/**
+	 * Sets the Kafka Streams property for the frequency with which to save the
+	 * position (offsets in source topics) of tasks (commit.interval.ms). Must be
+	 * zero for processing all record, for example, when processing bulks of
+	 * records. Can be minus one for using the default.
+	 */
+	public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
+		if (commitIntervalMs < -1) {
+			throw new IllegalArgumentException("Commit interval must be greater or equal -1.");
+		}
+		this.commitIntervalMs = commitIntervalMs;
+		return this;
+	}
+
+	/**
+	 * Sets the Kafka Streams property for maximum number of memory bytes to be used
+	 * for record caches across all threads (cache.max.bytes.buffering). Must be
+	 * zero for processing all record, for example, when processing bulks of
+	 * records. Can be minus one for using the default.
+	 */
+	public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
+		if (cacheMaxBytesBuffering < -1) {
+			throw new IllegalArgumentException("Cache max bytes buffering must be greater or equal -1.");
+		}
+		this.cacheMaxBytesBuff = cacheMaxBytesBuffering;
+		return this;
+	}
+
+	/**
+	 * Builds the {@link KafkaStreams} instance.
+	 */
+	public KafkaStreams build() {
+		Objects.requireNonNull(this.inputTopic, "Input topic has not been set.");
+		// TODO log parameters
+		final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic,
+				this.windowDuration);
+		final Properties properties = PropertiesBuilder.bootstrapServers(this.bootstrapServers)
+				.applicationId(APPLICATION_NAME + '-' + APPLICATION_VERSION) // TODO as parameter
+				.set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
+				.set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
+				.set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
+				.set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG").build();
+		return new KafkaStreams(topologyBuilder.build(), properties);
+	}
+
+}
diff --git a/uc3-application/src/main/java/uc3/streamprocessing/TopologyBuilder.java b/uc3-application/src/main/java/uc3/streamprocessing/TopologyBuilder.java
new file mode 100644
index 0000000000000000000000000000000000000000..608c00940d9d5d8f2a210efdb98c54e385132405
--- /dev/null
+++ b/uc3-application/src/main/java/uc3/streamprocessing/TopologyBuilder.java
@@ -0,0 +1,54 @@
+package uc3.streamprocessing;
+
+import com.google.gson.Gson;
+import java.time.Duration;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.streams.StreamsBuilder;
+import org.apache.kafka.streams.Topology;
+import org.apache.kafka.streams.kstream.Consumed;
+import org.apache.kafka.streams.kstream.Materialized;
+import org.apache.kafka.streams.kstream.TimeWindows;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
+import titan.ccp.models.records.ActivePowerRecordFactory;
+
+/**
+ * Builds Kafka Stream Topology for the History microservice.
+ */
+public class TopologyBuilder {
+
+	private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class);
+
+	private final String inputTopic;
+	private final String outputTopic;
+	private final Duration duration;
+	private final Gson gson;
+
+	private final StreamsBuilder builder = new StreamsBuilder();
+
+	/**
+	 * Create a new {@link TopologyBuilder} using the given topics.
+	 */
+	public TopologyBuilder(final String inputTopic, final String outputTopic, final Duration duration) {
+		this.inputTopic = inputTopic;
+		this.outputTopic = outputTopic;
+		this.duration = duration;
+		this.gson = new Gson();
+	}
+
+	/**
+	 * Build the {@link Topology} for the History microservice.
+	 */
+	public Topology build() {
+		this.builder
+				.stream(this.inputTopic,
+						Consumed.with(Serdes.String(), IMonitoringRecordSerde.serde(new ActivePowerRecordFactory())))
+				.groupByKey().windowedBy(TimeWindows.of(this.duration))
+				.aggregate(() -> 0.0, (key, activePowerRecord, agg) -> agg + activePowerRecord.getValueInW(),
+						Materialized.with(Serdes.String(), Serdes.Double()))
+				.toStream().peek((k, v) -> System.out.printf("key %s, value %f \n", k, v)).to(this.outputTopic);
+
+		return this.builder.build();
+	}
+}
diff --git a/uc3-application/src/main/resources/META-INF/application.properties b/uc3-application/src/main/resources/META-INF/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..d2002fd1c8841368d47017b2ce7939bfc42877aa
--- /dev/null
+++ b/uc3-application/src/main/resources/META-INF/application.properties
@@ -0,0 +1,6 @@
+kafka.bootstrap.servers=localhost:9092
+kafka.input.topic=input
+kafka.output.topic=output
+num.threads=1
+commit.interval.ms=10
+cache.max.bytes.buffering=-1
diff --git a/uc3-workload-generator/Dockerfile b/uc3-workload-generator/Dockerfile
index 9b17de3af09feb1af8d14fd277d88c8c2797142e..6efd5ec6163815c467ef22e18f3d2cc1e0e3259a 100644
--- a/uc3-workload-generator/Dockerfile
+++ b/uc3-workload-generator/Dockerfile
@@ -1,6 +1,6 @@
-FROM openjdk:11-slim
-
-ADD build/distributions/exp-bigdata19-bridge.tar /
-
-CMD export JAVA_OPTS=-Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL \
-    && /exp-bigdata19-bridge/bin/exp-bigdata19-bridge
\ No newline at end of file
+FROM openjdk:11-slim
+
+ADD build/distributions/uc3-workload-generator.tar /
+
+CMD  JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \
+     /uc3-workload-generator/bin/uc3-workload-generator
\ No newline at end of file
diff --git a/uc3-workload-generator/build.gradle b/uc3-workload-generator/build.gradle
index 12e597b37f775a7ad48a7e6009ed075213e1712b..5cb1fdfccc5677b64447b3e644e7fca47c2cd571 100644
--- a/uc3-workload-generator/build.gradle
+++ b/uc3-workload-generator/build.gradle
@@ -21,7 +21,7 @@ dependencies {
     testCompile 'junit:junit:4.12'
 }
 
-mainClassName = "titan.ccp.kiekerbridge.expbigdata19.ExperimentorBigData"
+mainClassName = "uc3.workloadGenerator.LoadGenerator"
 
 eclipse {
     classpath {
diff --git a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java b/uc3-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
similarity index 98%
rename from uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
rename to uc3-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
index b46128c8ebfd52aeeb7127777bb6530761f35181..6c67cf722b4dce87f0bc197ba80f8f117f82198e 100644
--- a/uc1-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
+++ b/uc3-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge;
+package kafkaSender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java b/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
deleted file mode 100644
index a50bbd942fccf5f8899414fe8cb7b82ad6953f87..0000000000000000000000000000000000000000
--- a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.Objects;
-
-public class ExperimentorBigData {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String modus = Objects.requireNonNullElse(System.getenv("MODUS"), "LoadCounter");
-
-    if (modus.equals("LoadGenerator")) {
-      LoadGenerator.main(args);
-    } else if (modus.equals("LoadGeneratorExtrem")) {
-      LoadGeneratorExtrem.main(args);
-    } else if (modus.equals("LoadCounter")) {
-      LoadCounter.main(args);
-    }
-
-  }
-}
diff --git a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java b/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
deleted file mode 100644
index 798f3014446605afab2cf20f3232896baab02802..0000000000000000000000000000000000000000
--- a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import com.google.common.math.StatsAccumulator;
-import java.time.Duration;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.Deserializer;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
-
-public class LoadCounter {
-
-  public static void main(final String[] args) throws InterruptedException {
-
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaOutputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_OUTPUT_TOPIC"), "output");
-
-    final Properties props = new Properties();
-    props.setProperty("bootstrap.servers", kafkaBootstrapServers);
-    props.setProperty("group.id", "load-counter");
-    props.setProperty("enable.auto.commit", "true");
-    props.setProperty("auto.commit.interval.ms", "1000");
-    props.setProperty("max.poll.records", "1000000");
-    props.setProperty("max.partition.fetch.bytes", "134217728"); // 128 MB
-    props.setProperty("key.deserializer",
-        "org.apache.kafka.common.serialization.StringDeserializer");
-    props.setProperty("value.deserializer",
-        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
-    final Deserializer<AggregatedActivePowerRecord> deserializer =
-        IMonitoringRecordSerde.deserializer(new AggregatedActivePowerRecordFactory());
-
-    final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
-    consumer.subscribe(List.of(kafkaInputTopic, kafkaOutputTopic));
-
-    executor.scheduleAtFixedRate(
-        () -> {
-          final long time = System.currentTimeMillis();
-          final ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(500));
-
-          long inputCount = 0;
-          for (final ConsumerRecord<String, byte[]> inputRecord : records
-              .records(kafkaInputTopic)) {
-            inputCount++;
-          }
-
-          long outputCount = 0;
-          final StatsAccumulator statsAccumulator = new StatsAccumulator();
-          for (final ConsumerRecord<String, byte[]> outputRecord : records
-              .records(kafkaOutputTopic)) {
-            outputCount++;
-            final AggregatedActivePowerRecord record =
-                deserializer.deserialize(kafkaOutputTopic, outputRecord.value());
-            final long latency = time - record.getTimestamp();
-            statsAccumulator.add(latency);
-          }
-
-          final double latency = statsAccumulator.count() > 0 ? statsAccumulator.mean() : 0.0;
-
-          final long elapsedTime = System.currentTimeMillis() - time;
-          System.out
-              .println("input," + time + ',' + elapsedTime + ',' + 0 + ',' + inputCount);
-          System.out
-              .println("output," + time + ',' + elapsedTime + ',' + latency + ',' + outputCount);
-        },
-        0,
-        1,
-        TimeUnit.SECONDS);
-  }
-
-}
diff --git a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java b/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
deleted file mode 100644
index 97a7c84f872f3ab676128d903ae121c376bf7608..0000000000000000000000000000000000000000
--- a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int periodMs =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers, kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(
-          () -> {
-            kafkaRecordSender.write(new ActivePowerRecord(
-                sensor,
-                System.currentTimeMillis(),
-                value));
-          },
-          initialDelay,
-          periodMs,
-          TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
-
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/ConfigPublisher.java
similarity index 97%
rename from uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
rename to uc3-workload-generator/src/main/java/uc3/workloadGenerator/ConfigPublisher.java
index d5f55a4ab7ca265b241e880363975070e9952c45..ab36397d810c276cf6e1e134364650a64d5997d1 100644
--- a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
+++ b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/ConfigPublisher.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge.expbigdata19;
+package uc3.workloadGenerator;
 
 import java.util.Properties;
 import java.util.concurrent.ExecutionException;
diff --git a/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGenerator.java b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..35defc90a06f8c6a834c54fdd69388106b5c3ceb
--- /dev/null
+++ b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGenerator.java
@@ -0,0 +1,87 @@
+package uc3.workloadGenerator;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import kafkaSender.KafkaRecordSender;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGenerator {
+
+	public static void main(final String[] args) throws InterruptedException, IOException {
+		// uc1
+
+		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
+		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+		final boolean sendRegistry = Boolean
+				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
+		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+				"localhost:9092");
+		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+		// create sensorRegistry
+		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+		addChildrens(sensorRegistry.getTopLevelSensor(), numSensor, 0);
+
+		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
+				.collect(Collectors.toList());
+
+		if (sendRegistry) {
+			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
+			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+			configPublisher.close();
+			System.out.println("Configuration sent.");
+
+			System.out.println("Now wait 30 seconds");
+			Thread.sleep(30_000);
+			System.out.println("And woke up again :)");
+		}
+
+		final Properties kafkaProperties = new Properties();
+		// kafkaProperties.put("acks", this.acknowledges);
+		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
+				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
+
+		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+		final Random random = new Random();
+
+		for (final String sensor : sensors) {
+			System.out.println("working");
+			final int initialDelay = random.nextInt(periodMs);
+			executor.scheduleAtFixedRate(() -> {
+				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+		}
+
+		System.out.println("Wait for termination...");
+		executor.awaitTermination(30, TimeUnit.DAYS);
+		System.out.println("Will terminate now");
+
+	}
+
+	private static void addChildrens(final MutableAggregatedSensor parent, final int numChildren, int nextId) {
+		for (int c = 0; c < numChildren; c++) {
+			parent.addChildMachineSensor("s_" + nextId);
+			nextId++;
+		}
+	}
+
+}
diff --git a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGeneratorExtrem.java
similarity index 98%
rename from uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
rename to uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGeneratorExtrem.java
index 5bfb6ad488e90f39ded2b9e4cb57d10099f1c538..2361cf2c04a1bc3bd05af089e6bdf72213eb6cb1 100644
--- a/uc1-application/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
+++ b/uc3-workload-generator/src/main/java/uc3/workloadGenerator/LoadGeneratorExtrem.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge.expbigdata19;
+package uc3.workloadGenerator;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -9,9 +9,9 @@ import java.util.Objects;
 import java.util.Properties;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
+import kafkaSender.KafkaRecordSender;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
 import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
 import titan.ccp.model.sensorregistry.MutableSensorRegistry;
 import titan.ccp.model.sensorregistry.SensorRegistry;
diff --git a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java b/uc4-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
similarity index 98%
rename from uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
rename to uc4-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
index b46128c8ebfd52aeeb7127777bb6530761f35181..6c67cf722b4dce87f0bc197ba80f8f117f82198e 100644
--- a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
+++ b/uc4-workload-generator/src/main/java/kafkaSender/KafkaRecordSender.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge;
+package kafkaSender;
 
 import java.util.Properties;
 import java.util.function.Function;
diff --git a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java b/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
deleted file mode 100644
index b46128c8ebfd52aeeb7127777bb6530761f35181..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/KafkaRecordSender.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package titan.ccp.kiekerbridge;
-
-import java.util.Properties;
-import java.util.function.Function;
-import kieker.common.record.IMonitoringRecord;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-
-
-/**
- * Sends monitoring records to Kafka.
- *
- * @param <T> {@link IMonitoringRecord} to send
- */
-public class KafkaRecordSender<T extends IMonitoringRecord> {
-
-  private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class);
-
-  private final String topic;
-
-  private final Function<T, String> keyAccessor;
-
-  private final Function<T, Long> timestampAccessor;
-
-  private final Producer<String, T> producer;
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, x -> "", x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor) {
-    this(bootstrapServers, topic, keyAccessor, x -> null, new Properties());
-  }
-
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor) {
-    this(bootstrapServers, topic, keyAccessor, timestampAccessor, new Properties());
-  }
-
-  /**
-   * Create a new {@link KafkaRecordSender}.
-   */
-  public KafkaRecordSender(final String bootstrapServers, final String topic,
-      final Function<T, String> keyAccessor, final Function<T, Long> timestampAccessor,
-      final Properties defaultProperties) {
-    this.topic = topic;
-    this.keyAccessor = keyAccessor;
-    this.timestampAccessor = timestampAccessor;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put("bootstrap.servers", bootstrapServers);
-    // properties.put("acks", this.acknowledges);
-    // properties.put("batch.size", this.batchSize);
-    // properties.put("linger.ms", this.lingerMs);
-    // properties.put("buffer.memory", this.bufferMemory);
-
-    this.producer = new KafkaProducer<>(properties, new StringSerializer(),
-        IMonitoringRecordSerde.serializer());
-  }
-
-  /**
-   * Write the passed monitoring record to Kafka.
-   */
-  public void write(final T monitoringRecord) {
-    final ProducerRecord<String, T> record =
-        new ProducerRecord<>(this.topic, null, this.timestampAccessor.apply(monitoringRecord),
-            this.keyAccessor.apply(monitoringRecord), monitoringRecord);
-
-    LOGGER.debug("Send record to Kafka topic {}: {}", this.topic, record);
-    this.producer.send(record);
-  }
-
-  public void terminate() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java b/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
deleted file mode 100644
index d5f55a4ab7ca265b241e880363975070e9952c45..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.util.Properties;
-import java.util.concurrent.ExecutionException;
-import org.apache.kafka.clients.producer.KafkaProducer;
-import org.apache.kafka.clients.producer.Producer;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import org.apache.kafka.clients.producer.ProducerRecord;
-import org.apache.kafka.common.serialization.StringSerializer;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.configuration.events.EventSerde;
-
-public class ConfigPublisher {
-
-  private final String topic;
-
-  private final Producer<Event, String> producer;
-
-  public ConfigPublisher(final String bootstrapServers, final String topic) {
-    this(bootstrapServers, topic, new Properties());
-  }
-
-  public ConfigPublisher(final String bootstrapServers, final String topic,
-      final Properties defaultProperties) {
-    this.topic = topic;
-
-    final Properties properties = new Properties();
-    properties.putAll(defaultProperties);
-    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-    properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, "134217728"); // 128 MB
-    properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, "134217728"); // 128 MB
-
-    this.producer =
-        new KafkaProducer<>(properties, EventSerde.serializer(), new StringSerializer());
-  }
-
-  public void publish(final Event event, final String value) {
-    final ProducerRecord<Event, String> record = new ProducerRecord<>(this.topic, event, value);
-    try {
-      this.producer.send(record).get();
-    } catch (InterruptedException | ExecutionException e) {
-      throw new IllegalArgumentException(e);
-    }
-  }
-
-  public void close() {
-    this.producer.close();
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java b/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
deleted file mode 100644
index a50bbd942fccf5f8899414fe8cb7b82ad6953f87..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ExperimentorBigData.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.Objects;
-
-public class ExperimentorBigData {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String modus = Objects.requireNonNullElse(System.getenv("MODUS"), "LoadCounter");
-
-    if (modus.equals("LoadGenerator")) {
-      LoadGenerator.main(args);
-    } else if (modus.equals("LoadGeneratorExtrem")) {
-      LoadGeneratorExtrem.main(args);
-    } else if (modus.equals("LoadCounter")) {
-      LoadCounter.main(args);
-    }
-
-  }
-}
diff --git a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java b/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
deleted file mode 100644
index 798f3014446605afab2cf20f3232896baab02802..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadCounter.java
+++ /dev/null
@@ -1,84 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import com.google.common.math.StatsAccumulator;
-import java.time.Duration;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
-import org.apache.kafka.clients.consumer.KafkaConsumer;
-import org.apache.kafka.common.serialization.Deserializer;
-import titan.ccp.common.kieker.kafka.IMonitoringRecordSerde;
-import titan.ccp.models.records.AggregatedActivePowerRecord;
-import titan.ccp.models.records.AggregatedActivePowerRecordFactory;
-
-public class LoadCounter {
-
-  public static void main(final String[] args) throws InterruptedException {
-
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaOutputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_OUTPUT_TOPIC"), "output");
-
-    final Properties props = new Properties();
-    props.setProperty("bootstrap.servers", kafkaBootstrapServers);
-    props.setProperty("group.id", "load-counter");
-    props.setProperty("enable.auto.commit", "true");
-    props.setProperty("auto.commit.interval.ms", "1000");
-    props.setProperty("max.poll.records", "1000000");
-    props.setProperty("max.partition.fetch.bytes", "134217728"); // 128 MB
-    props.setProperty("key.deserializer",
-        "org.apache.kafka.common.serialization.StringDeserializer");
-    props.setProperty("value.deserializer",
-        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
-    final Deserializer<AggregatedActivePowerRecord> deserializer =
-        IMonitoringRecordSerde.deserializer(new AggregatedActivePowerRecordFactory());
-
-    final KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
-    consumer.subscribe(List.of(kafkaInputTopic, kafkaOutputTopic));
-
-    executor.scheduleAtFixedRate(
-        () -> {
-          final long time = System.currentTimeMillis();
-          final ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofMillis(500));
-
-          long inputCount = 0;
-          for (final ConsumerRecord<String, byte[]> inputRecord : records
-              .records(kafkaInputTopic)) {
-            inputCount++;
-          }
-
-          long outputCount = 0;
-          final StatsAccumulator statsAccumulator = new StatsAccumulator();
-          for (final ConsumerRecord<String, byte[]> outputRecord : records
-              .records(kafkaOutputTopic)) {
-            outputCount++;
-            final AggregatedActivePowerRecord record =
-                deserializer.deserialize(kafkaOutputTopic, outputRecord.value());
-            final long latency = time - record.getTimestamp();
-            statsAccumulator.add(latency);
-          }
-
-          final double latency = statsAccumulator.count() > 0 ? statsAccumulator.mean() : 0.0;
-
-          final long elapsedTime = System.currentTimeMillis() - time;
-          System.out
-              .println("input," + time + ',' + elapsedTime + ',' + 0 + ',' + inputCount);
-          System.out
-              .println("output," + time + ',' + elapsedTime + ',' + latency + ',' + outputCount);
-        },
-        0,
-        1,
-        TimeUnit.SECONDS);
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java b/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
deleted file mode 100644
index 97a7c84f872f3ab676128d903ae121c376bf7608..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGenerator.java
+++ /dev/null
@@ -1,124 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.Random;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGenerator {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int periodMs =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(
-        kafkaBootstrapServers, kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(),
-        kafkaProperties);
-
-    final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
-    final Random random = new Random();
-
-    for (final String sensor : sensors) {
-      final int initialDelay = random.nextInt(periodMs);
-      executor.scheduleAtFixedRate(
-          () -> {
-            kafkaRecordSender.write(new ActivePowerRecord(
-                sensor,
-                System.currentTimeMillis(),
-                value));
-          },
-          initialDelay,
-          periodMs,
-          TimeUnit.MILLISECONDS);
-    }
-
-    System.out.println("Wait for termination...");
-    executor.awaitTermination(30, TimeUnit.DAYS);
-    System.out.println("Will terminate now");
-
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java b/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
deleted file mode 100644
index 5bfb6ad488e90f39ded2b9e4cb57d10099f1c538..0000000000000000000000000000000000000000
--- a/uc4-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
+++ /dev/null
@@ -1,165 +0,0 @@
-package titan.ccp.kiekerbridge.expbigdata19;
-
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.ThreadMXBean;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.kafka.clients.producer.ProducerConfig;
-import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
-import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
-import titan.ccp.model.sensorregistry.MutableSensorRegistry;
-import titan.ccp.model.sensorregistry.SensorRegistry;
-import titan.ccp.models.records.ActivePowerRecord;
-
-public class LoadGeneratorExtrem {
-
-  public static void main(final String[] args) throws InterruptedException, IOException {
-
-    final String hierarchy = Objects.requireNonNullElse(System.getenv("HIERARCHY"), "deep");
-    final int numNestedGroups =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1"));
-    final int numSensor =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1"));
-    final int value =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
-    final boolean sendRegistry =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
-    final boolean doNothing =
-        Boolean.parseBoolean(Objects.requireNonNullElse(System.getenv("DO_NOTHING"), "false"));
-    final int threads =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
-    final int producers =
-        Integer.parseInt(Objects.requireNonNullElse(System.getenv("PRODUCERS"), "1"));
-    final String kafkaBootstrapServers =
-        Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092");
-    final String kafkaInputTopic =
-        Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
-    final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
-    final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
-    final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
-
-    final SensorRegistry sensorRegistry =
-        buildSensorRegistry(hierarchy, numNestedGroups, numSensor);
-
-    if (sendRegistry) {
-      final ConfigPublisher configPublisher =
-          new ConfigPublisher(kafkaBootstrapServers, "configuration");
-      configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
-      configPublisher.close();
-      System.out.println("Configuration sent.");
-
-      System.out.println("Now wait 30 seconds");
-      Thread.sleep(30_000);
-      System.out.println("And woke up again :)");
-    }
-
-    final Properties kafkaProperties = new Properties();
-    // kafkaProperties.put("acks", this.acknowledges);
-    kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
-    kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
-    kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
-    final List<KafkaRecordSender<ActivePowerRecord>> kafkaRecordSenders = Stream
-        .<KafkaRecordSender<ActivePowerRecord>>generate(
-            () -> new KafkaRecordSender<>(
-                kafkaBootstrapServers,
-                kafkaInputTopic,
-                r -> r.getIdentifier(),
-                r -> r.getTimestamp(),
-                kafkaProperties))
-        .limit(producers)
-        .collect(Collectors.toList());
-
-    final List<String> sensors =
-        sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
-            .collect(Collectors.toList());
-
-    for (int i = 0; i < threads; i++) {
-      final int threadId = i;
-      new Thread(() -> {
-        while (true) {
-          for (final String sensor : sensors) {
-            if (!doNothing) {
-              kafkaRecordSenders.get(threadId % producers).write(new ActivePowerRecord(
-                  sensor,
-                  System.currentTimeMillis(),
-                  value));
-            }
-          }
-        }
-      }).start();
-    }
-
-    while (true) {
-      printCpuUsagePerThread();
-    }
-
-    // System.out.println("Wait for termination...");
-    // Thread.sleep(30 * 24 * 60 * 60 * 1000L);
-    // System.out.println("Will terminate now");
-  }
-
-  private static void printCpuUsagePerThread() throws InterruptedException {
-    final ThreadMXBean tmxb = ManagementFactory.getThreadMXBean();
-    final List<Thread> threads = new ArrayList<>(Thread.getAllStackTraces().keySet());
-
-    final long start = System.nanoTime();
-    final long[] startCpuTimes = new long[threads.size()];
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      startCpuTimes[i] = tmxb.getThreadCpuTime(thread.getId());
-    }
-
-    Thread.sleep(5000);
-
-    for (int i = 0; i < threads.size(); i++) {
-      final Thread thread = threads.get(i);
-      final long cpuTime = tmxb.getThreadCpuTime(thread.getId()) - startCpuTimes[i];
-      final long dur = System.nanoTime() - start;
-      final double util = (double) cpuTime / dur;
-      System.out.println(
-          "Thread " + thread.getName() + ": " + String.format(java.util.Locale.US, "%.4f", util));
-    }
-  }
-
-  private static SensorRegistry buildSensorRegistry(final String hierarchy,
-      final int numNestedGroups, final int numSensor) {
-    final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
-    if (hierarchy.equals("deep")) {
-      MutableAggregatedSensor lastSensor = sensorRegistry.getTopLevelSensor();
-      for (int lvl = 1; lvl < numNestedGroups; lvl++) {
-        lastSensor = lastSensor.addChildAggregatedSensor("group_lvl_" + lvl);
-      }
-      for (int s = 0; s < numSensor; s++) {
-        lastSensor.addChildMachineSensor("sensor_" + s);
-      }
-    } else if (hierarchy.equals("full")) {
-      addChildren(sensorRegistry.getTopLevelSensor(), numSensor, 1, numNestedGroups, 0);
-    } else {
-      throw new IllegalStateException();
-    }
-    return sensorRegistry;
-  }
-
-  private static int addChildren(final MutableAggregatedSensor parent, final int numChildren,
-      final int lvl, final int maxLvl, int nextId) {
-    for (int c = 0; c < numChildren; c++) {
-      if (lvl == maxLvl) {
-        parent.addChildMachineSensor("s_" + nextId);
-        nextId++;
-      } else {
-        final MutableAggregatedSensor newParent =
-            parent.addChildAggregatedSensor("g_" + lvl + '_' + nextId);
-        nextId++;
-        nextId = addChildren(newParent, numChildren, lvl + 1, maxLvl, nextId);
-      }
-    }
-    return nextId;
-  }
-
-}
diff --git a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/ConfigPublisher.java
similarity index 97%
rename from uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
rename to uc4-workload-generator/src/main/java/uc4/workloadGenerator/ConfigPublisher.java
index d5f55a4ab7ca265b241e880363975070e9952c45..b126668818780caca1ea7c3c63b2203813130e9b 100644
--- a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/ConfigPublisher.java
+++ b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/ConfigPublisher.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge.expbigdata19;
+package uc4.workloadGenerator;
 
 import java.util.Properties;
 import java.util.concurrent.ExecutionException;
diff --git a/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGenerator.java b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGenerator.java
new file mode 100644
index 0000000000000000000000000000000000000000..039687e0211375f206951c41a054c76e661407f8
--- /dev/null
+++ b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGenerator.java
@@ -0,0 +1,87 @@
+package uc4.workloadGenerator;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.Random;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+import kafkaSender.KafkaRecordSender;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import titan.ccp.configuration.events.Event;
+import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
+import titan.ccp.model.sensorregistry.MutableSensorRegistry;
+import titan.ccp.models.records.ActivePowerRecord;
+
+public class LoadGenerator {
+
+	public static void main(final String[] args) throws InterruptedException, IOException {
+		// uc1
+
+		final int numSensor = Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10"));
+		final int periodMs = Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000"));
+		final int value = Integer.parseInt(Objects.requireNonNullElse(System.getenv("VALUE"), "10"));
+		final boolean sendRegistry = Boolean
+				.parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true"));
+		final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4"));
+		final String kafkaBootstrapServers = Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"),
+				"localhost:9092");
+		final String kafkaInputTopic = Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input");
+		final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE");
+		final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS");
+		final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY");
+
+		// create sensorRegistry
+		final MutableSensorRegistry sensorRegistry = new MutableSensorRegistry("group_lvl_0");
+		addChildrens(sensorRegistry.getTopLevelSensor(), numSensor, 0);
+
+		final List<String> sensors = sensorRegistry.getMachineSensors().stream().map(s -> s.getIdentifier())
+				.collect(Collectors.toList());
+
+		// TODO Brauchen wir das ?
+		if (sendRegistry) {
+			final ConfigPublisher configPublisher = new ConfigPublisher(kafkaBootstrapServers, "configuration");
+			configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson());
+			configPublisher.close();
+			System.out.println("Configuration sent.");
+
+			System.out.println("Now wait 30 seconds");
+			Thread.sleep(30_000);
+			System.out.println("And woke up again :)");
+		}
+
+		final Properties kafkaProperties = new Properties();
+		// kafkaProperties.put("acks", this.acknowledges);
+		kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize);
+		kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs);
+		kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory);
+		final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = new KafkaRecordSender<>(kafkaBootstrapServers,
+				kafkaInputTopic, r -> r.getIdentifier(), r -> r.getTimestamp(), kafkaProperties);
+
+		final ScheduledExecutorService executor = Executors.newScheduledThreadPool(threads);
+		final Random random = new Random();
+
+		for (final String sensor : sensors) {
+			final int initialDelay = random.nextInt(periodMs);
+			executor.scheduleAtFixedRate(() -> {
+				kafkaRecordSender.write(new ActivePowerRecord(sensor, System.currentTimeMillis(), value));
+			}, initialDelay, periodMs, TimeUnit.MILLISECONDS);
+		}
+
+		System.out.println("Wait for termination...");
+		executor.awaitTermination(30, TimeUnit.DAYS);
+		System.out.println("Will terminate now");
+
+	}
+
+	private static void addChildrens(final MutableAggregatedSensor parent, final int numChildren, int nextId) {
+		for (int c = 0; c < numChildren; c++) {
+			parent.addChildMachineSensor("s_" + nextId);
+			nextId++;
+		}
+	}
+
+}
diff --git a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGeneratorExtrem.java
similarity index 98%
rename from uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
rename to uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGeneratorExtrem.java
index 5bfb6ad488e90f39ded2b9e4cb57d10099f1c538..a864a0f333d9097eece8f4e93440e377500cef84 100644
--- a/uc3-workload-generator/src/main/java/titan/ccp/kiekerbridge/expbigdata19/LoadGeneratorExtrem.java
+++ b/uc4-workload-generator/src/main/java/uc4/workloadGenerator/LoadGeneratorExtrem.java
@@ -1,4 +1,4 @@
-package titan.ccp.kiekerbridge.expbigdata19;
+package uc4.workloadGenerator;
 
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
@@ -9,9 +9,9 @@ import java.util.Objects;
 import java.util.Properties;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
+import kafkaSender.KafkaRecordSender;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import titan.ccp.configuration.events.Event;
-import titan.ccp.kiekerbridge.KafkaRecordSender;
 import titan.ccp.model.sensorregistry.MutableAggregatedSensor;
 import titan.ccp.model.sensorregistry.MutableSensorRegistry;
 import titan.ccp.model.sensorregistry.SensorRegistry;