diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 433facfa99765228af9b753cf88378fc9d939a88..c4b8495224ad81ae2eebe81b0087319422da1969 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -319,7 +319,18 @@ test-slo-checker-lag-trend:
   tags:
     - exec-docker
   script:
-    - cd slope-evaluator
+    - cd slo-checker/record-lag
+    - pip install -r requirements.txt
+    - cd app
+    - python -m unittest
+
+test-slo-checker-dropped-records-kstreams:
+  stage: test
+  image: python:3.7-slim
+  tags:
+    - exec-docker
+  script:
+    - cd slo-checker/dropped-records
     - pip install -r requirements.txt
     - cd app
     - python -m unittest
@@ -332,7 +343,7 @@ deploy-slo-checker-lag-trend:
     - test-slo-checker-lag-trend
   script:
     - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t theodolite-slo-checker-lag-trend slope-evaluator
+    - docker build --pull -t theodolite-slo-checker-lag-trend slo-checker/record-lag
     - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-lag-trend $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend:${DOCKER_TAG_NAME}latest"
     - "[ $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-lag-trend $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend:$CI_COMMIT_TAG"
     - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
@@ -342,7 +353,32 @@ deploy-slo-checker-lag-trend:
     - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
       when: always
     - changes:
-      - slope-evaluator/**/*
+      - slo-checker/record-lag/**/*
+      if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: always
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: manual
+      allow_failure: true
+
+deploy-slo-checker-dropped-records-kstreams:
+  stage: deploy
+  extends:
+    - .dind
+  needs:
+    - test-slo-checker-dropped-records-kstreams
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite-slo-checker-dropped-records-kstreams slo-checker/dropped-records
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-dropped-records-kstreams $CR_HOST/$CR_ORG/theodolite-slo-checker-dropped-records-kstreams:${DOCKER_TAG_NAME}latest"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-dropped-records-kstreams $CR_HOST/$CR_ORG/theodolite-slo-checker-dropped-records-kstreams:$CI_COMMIT_TAG"
+    - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
+    - docker push $CR_HOST/$CR_ORG/theodolite-slo-checker-dropped-records-kstreams
+    - docker logout
+  rules:
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - slo-checker/dropped-records/**/*
       if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
       when: always
     - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
diff --git a/CITATION.cff b/CITATION.cff
index 52e6e13286c0ba0aca34005a4d245d73b9869874..ca94e1c5039d3aeac3a4535767d5217de4960a6f 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -8,7 +8,7 @@ authors:
     given-names: Wilhelm
     orcid: "https://orcid.org/0000-0001-6625-4335"
 title: Theodolite
-version: "0.4.0"
+version: "0.5.1"
 repository-code: "https://github.com/cau-se/theodolite"
 license: "Apache-2.0"
 doi: "10.1016/j.bdr.2021.100209"
diff --git a/README.md b/README.md
index f2673f4b9ed0c46987963f8b455e19def802db79..804a193df21f3883ecf9a727af5a743b77a9cceb 100644
--- a/README.md
+++ b/README.md
@@ -4,20 +4,17 @@
 
 Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines. It consists of three modules:
 
-## Theodolite Benchmarks
-
-Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams and Apache Flink. The benchmark sources can be found in [Thedolite benchmarks](benchmarks).
-
-
-## Theodolite Execution Framework
-
-Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
+## Theodolite Benchmarking Tool
 
+Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. It is recommended to install Theodolite with the package manager Helm. The Theodolite Helm chart along with instructions how to install it can be found in the [`helm`](helm) directory.
 
 ## Theodolite Analysis Tools
 
-Theodolite's benchmarking method creates a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+Theodolite's benchmarking method maps load intensities to the resource amounts that are required for processing them. A plot showing how resource demand evolves with an increasing load allows to draw conclusions about the scalability of a stream processing engine or its deployment. Theodolite provides Jupyter notebooks for creating such plots based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+
+## Theodolite Benchmarks
 
+Theodolite comes with 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding load generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams and Apache Flink. The benchmark sources can be found in [Thedolite benchmarks](theodolite-benchmarks).
 
 ## How to Cite
 
diff --git a/analysis/demand-metric-plot.ipynb b/analysis/demand-metric-plot.ipynb
index 90ef227dbf6a4566760329b615d5f59b4cc2bc25..71e08f0590f819a63b1bdd6bf13b57ac665f65bc 100644
--- a/analysis/demand-metric-plot.ipynb
+++ b/analysis/demand-metric-plot.ipynb
@@ -1,22 +1,22 @@
 {
  "cells": [
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "# Theodolite Analysis - Plotting the Demand Metric\n",
     "\n",
     "This notebook creates a plot, showing scalability as a function that maps load intensities to the resources required for processing them. It is able to combine multiple such plots in one figure, for example, to compare multiple systems or configurations.\n",
     "\n",
     "The notebook takes a CSV file for each plot mapping load intensities to minimum required resources, computed by the `demand-metric-plot.ipynb` notebook."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "First, we need to import some libraries, which are required for creating the plots."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -33,11 +33,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "We need to specify the directory, where the demand CSV files can be found, and a dictionary that maps a system description (e.g. its name) to the corresponding CSV file (prefix). To use Unicode narrow non-breaking spaces in the description format it as `u\"1000\\u202FmCPU\"`."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -53,11 +53,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "Now, we combie all systems described in `experiments`."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -71,11 +71,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "We might want to display the mappings before we plot it."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -87,11 +87,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "The following code creates a MatPlotLib figure showing the scalability plots for all specified systems. You might want to adjust its styling etc. according to your preferences. Make sure to also set a filename."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -149,27 +149,33 @@
   }
  ],
  "metadata": {
+  "file_extension": ".py",
+  "interpreter": {
+   "hash": "e9e076445e1891a25f59b525adcc71b09846b3f9cf034ce4147fc161b19af121"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.8.10 64-bit ('.venv': venv)",
+   "name": "python3"
+  },
   "language_info": {
-   "name": "python",
    "codemirror_mode": {
     "name": "ipython",
     "version": 3
    },
-   "version": "3.8.5-final"
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
   },
-  "orig_nbformat": 2,
-  "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "npconvert_exporter": "python",
+  "orig_nbformat": 2,
   "pygments_lexer": "ipython3",
-  "version": 3,
-  "kernelspec": {
-   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
-   "display_name": "Python 3.7.0 64-bit ('.venv': venv)",
-   "language": "python"
-  }
+  "version": 3
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
\ No newline at end of file
+}
diff --git a/analysis/demand-metric.ipynb b/analysis/demand-metric.ipynb
index bcea129b7cb07465fa99f32b6f8b2b6115e8a0aa..fbf3ee02960a1e06457eef5dda96cb6d0a1a75ac 100644
--- a/analysis/demand-metric.ipynb
+++ b/analysis/demand-metric.ipynb
@@ -1,6 +1,8 @@
 {
  "cells": [
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "# Theodolite Analysis - Demand Metric\n",
     "\n",
@@ -9,11 +11,11 @@
     "Theodolite's *demand* metric is a function, mapping load intensities to the minimum required resources (e.g., instances) that are required to process this load. With this notebook, the *demand* metric function is approximated by a map of tested load intensities to their minimum required resources.\n",
     "\n",
     "The final output when running this notebook will be a CSV file, providig this mapping. It can be used to create nice plots of a system's scalability using the `demand-metric-plot.ipynb` notebook."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "In the following cell, we need to specifiy:\n",
     "\n",
@@ -22,9 +24,7 @@
     "* `max_lag_trend_slope`: The maximum tolerable increase in queued messages per second.\n",
     "* `measurement_dir`: The directory where the measurement data files are to be found.\n",
     "* `results_dir`: The directory where the computed demand CSV files are to be stored."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -40,11 +40,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "With the following call, we compute our demand mapping."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -58,11 +58,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "We might already want to plot a simple visualization here:"
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -74,11 +74,11 @@
    ]
   },
   {
+   "cell_type": "markdown",
+   "metadata": {},
    "source": [
     "Finally we store the results in a CSV file."
-   ],
-   "cell_type": "markdown",
-   "metadata": {}
+   ]
   },
   {
    "cell_type": "code",
@@ -93,27 +93,33 @@
   }
  ],
  "metadata": {
+  "file_extension": ".py",
+  "interpreter": {
+   "hash": "e9e076445e1891a25f59b525adcc71b09846b3f9cf034ce4147fc161b19af121"
+  },
+  "kernelspec": {
+   "display_name": "Python 3.8.10 64-bit ('.venv': venv)",
+   "name": "python3"
+  },
   "language_info": {
-   "name": "python",
    "codemirror_mode": {
     "name": "ipython",
     "version": 3
    },
-   "version": "3.8.5-final"
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
   },
-  "orig_nbformat": 2,
-  "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "npconvert_exporter": "python",
+  "orig_nbformat": 2,
   "pygments_lexer": "ipython3",
-  "version": 3,
-  "kernelspec": {
-   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
-   "display_name": "Python 3.7.0 64-bit ('.venv': venv)",
-   "language": "python"
-  }
+  "version": 3
  },
  "nbformat": 4,
  "nbformat_minor": 2
-}
\ No newline at end of file
+}
diff --git a/analysis/src/demand.py b/analysis/src/demand.py
index dfb20c05af8e9a134eedd2cdb584c961a82369f5..2178ab7c5dc5f7e4c04ebb58d4c14c9bf8b1aeff 100644
--- a/analysis/src/demand.py
+++ b/analysis/src/demand.py
@@ -1,59 +1,51 @@
 import os
 from datetime import datetime, timedelta, timezone
 import pandas as pd
+from pandas.core.frame import DataFrame
 from sklearn.linear_model import LinearRegression
 
 def demand(exp_id, directory, threshold, warmup_sec):
     raw_runs = []
 
-    # Compute SL, i.e., lag trend, for each tested configuration
-    filenames = [filename for filename in os.listdir(directory) if filename.startswith(f"exp{exp_id}") and filename.endswith("totallag.csv")]
+    # Compute SLI, i.e., lag trend, for each tested configuration
+    filenames = [filename for filename in os.listdir(directory) if filename.startswith(f"exp{exp_id}") and "lag-trend" in filename and filename.endswith(".csv")]
     for filename in filenames:
-        #print(filename)
         run_params = filename[:-4].split("_")
-        dim_value = run_params[2]
-        instances = run_params[3]
+        dim_value = run_params[1]
+        instances = run_params[2]
 
         df = pd.read_csv(os.path.join(directory, filename))
-        #input = df.loc[df['topic'] == "input"]
         input = df
-        #print(input)
+
         input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
-        #print(input)
-        #print(input.iloc[0, 'timestamp'])
+    
         regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
-        #regress = input
 
-        #input.plot(kind='line',x='timestamp',y='value',color='red')
-        #plt.show()
+        X = regress.iloc[:, 1].values.reshape(-1, 1)  # values converts it into a numpy array
+        Y = regress.iloc[:, 2].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
 
-        X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array
-        Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
         linear_regressor = LinearRegression()  # create object for the class
         linear_regressor.fit(X, Y)  # perform linear regression
         Y_pred = linear_regressor.predict(X)  # make predictions
 
         trend_slope = linear_regressor.coef_[0][0]
-        #print(linear_regressor.coef_)
 
         row = {'load': int(dim_value), 'resources': int(instances), 'trend_slope': trend_slope}
-        #print(row)
         raw_runs.append(row)
 
     runs = pd.DataFrame(raw_runs)
 
-    # Set suitable = True if SLOs are met, i.e., lag trend is below threshold
-    runs["suitable"] =  runs.apply(lambda row: row['trend_slope'] < threshold, axis=1)
-
-    # Sort results table (unsure if required)
-    runs.columns = runs.columns.str.strip()
-    runs.sort_values(by=["load", "resources"])
+    # Group by the load and resources to handle repetitions, and take from the reptitions the median
+    # for even reptitions, the mean of the two middle values is used
+    medians = runs.groupby(by=['load', 'resources'], as_index=False).median()
 
-    # Filter only suitable configurations
-    filtered = runs[runs.apply(lambda x: x['suitable'], axis=1)]
-
-    # Compute demand per load intensity
-    grouped = filtered.groupby(['load'])['resources'].min()
-    demand_per_load = grouped.to_frame().reset_index()
+    # Set suitable = True if SLOs are met, i.e., lag trend slope is below threshold
+    medians["suitable"] =  medians.apply(lambda row: row['trend_slope'] < threshold, axis=1)
 
+    suitable = medians[medians.apply(lambda x: x['suitable'], axis=1)]
+    
+    # Compute minimal demand per load intensity
+    demand_per_load = suitable.groupby(by=['load'], as_index=False)['resources'].min()
+    
     return demand_per_load
+
diff --git a/codemeta.json b/codemeta.json
index 5696996592f63bf8ece23239d8204e0f25b9cce1..a158e30eb7f1ab433779678aba3a1cc3b7e33c80 100644
--- a/codemeta.json
+++ b/codemeta.json
@@ -5,10 +5,10 @@
     "codeRepository": "https://github.com/cau-se/theodolite",
     "dateCreated": "2020-03-13",
     "datePublished": "2020-07-27",
-    "dateModified": "2021-03-18",
+    "dateModified": "2021-11-12",
     "downloadUrl": "https://github.com/cau-se/theodolite/releases",
     "name": "Theodolite",
-    "version": "0.4.0",
+    "version": "0.5.1",
     "description": "Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines.",
     "developmentStatus": "active",
     "referencePublication": "https://doi.org/10.1016/j.bdr.2021.100209",
diff --git a/docs/README.md b/docs/README.md
index 4fd13bdfc157efe8b3491695bb83972f96a82c5d..eb0848d52ec4235c6325ba0a373ea2628e52a102 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -10,16 +10,20 @@ permalink: /
 
 Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines. It consists of three modules:
 
-## Theodolite Benchmarks
+## Theodolite Benchmarking Tool
 
-Theodolite contains 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding workload generator is provided. Currently, this repository provides benchmark implementations for Kafka Streams.
+Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys its components in a cloud environment, orchestrated by Kubernetes. It is recommended to install Theodolite with the package manager Helm. The Theodolite Helm chart along with instructions how to install it can be found in the [`helm`](helm) directory.
 
+## Theodolite Analysis Tools
 
-## Theodolite Execution Framework
+Theodolite's benchmarking method maps load intensities to the resource amounts that are required for processing them. A plot showing how resource demand evolves with an increasing load allows to draw conclusions about the scalability of a stream processing engine or its deployment. Theodolite provides Jupyter notebooks for creating such plots based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
 
-Theodolite aims to benchmark scalability of stream processing engines for real use cases. Microservices that apply stream processing techniques are usually deployed in elastic cloud environments. Hence, Theodolite's cloud-native benchmarking framework deploys as components in a cloud environment, orchestrated by Kubernetes. More information on how to execute scalability benchmarks can be found in [Thedolite execution framework](execution).
+## Theodolite Benchmarks
 
+Theodolite comes with 4 application benchmarks, which are based on typical use cases for stream processing within microservices. For each benchmark, a corresponding load generator is provided. Currently, this repository provides benchmark implementations for Apache Kafka Streams and Apache Flink. The benchmark sources can be found in [Thedolite benchmarks](theodolite-benchmarks).
 
-## Theodolite Analysis Tools
+## How to Cite
+
+If you use Theodolite, please cite
 
-Theodolite's benchmarking method create a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis).
+> Sören Henning and Wilhelm Hasselbring. (2021). Theodolite: Scalability Benchmarking of Distributed Stream Processing Engines in Microservice Architectures. Big Data Research, Volume 25. DOI: [10.1016/j.bdr.2021.100209](https://doi.org/10.1016/j.bdr.2021.100209). arXiv:[2009.00304](https://arxiv.org/abs/2009.00304).
diff --git a/docs/index.yaml b/docs/index.yaml
index 087124d158794e1b48dfc880e26da2c91d78808f..54580ea45f1c678443dae96c7139f53fdac37f60 100644
--- a/docs/index.yaml
+++ b/docs/index.yaml
@@ -1,6 +1,76 @@
 apiVersion: v1
 entries:
   theodolite:
+  - apiVersion: v2
+    appVersion: 0.5.1
+    created: "2021-11-12T16:15:01.629937292+01:00"
+    dependencies:
+    - condition: grafana.enabled
+      name: grafana
+      repository: https://grafana.github.io/helm-charts
+      version: 6.17.5
+    - condition: kube-prometheus-stack.enabled
+      name: kube-prometheus-stack
+      repository: https://prometheus-community.github.io/helm-charts
+      version: 12.0.0
+    - condition: cp-helm-charts.enabled
+      name: cp-helm-charts
+      repository: https://soerenhenning.github.io/cp-helm-charts
+      version: 0.6.0
+    - condition: kafka-lag-exporter.enabled
+      name: kafka-lag-exporter
+      repository: https://lightbend.github.io/kafka-lag-exporter/repo/
+      version: 0.6.6
+    description: Theodolite is a framework for benchmarking the scalability stream
+      processing engines.
+    digest: a67374c4cb2b0e8b2d711468364c6b4a486a910bd1c667dbf3c5614e36e0680c
+    home: https://cau-se.github.io/theodolite
+    maintainers:
+    - email: soeren.henning@email.uni-kiel.de
+      name: Sören Henning
+      url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
+    name: theodolite
+    sources:
+    - https://github.com/cau-se/theodolite
+    type: application
+    urls:
+    - https://github.com/cau-se/theodolite/releases/download/v0.5.1/theodolite-0.5.1.tgz
+    version: 0.5.1
+  - apiVersion: v2
+    appVersion: 0.5.0
+    created: "2021-11-04T17:45:14.153231798+01:00"
+    dependencies:
+    - condition: grafana.enabled
+      name: grafana
+      repository: https://grafana.github.io/helm-charts
+      version: 6.0.0
+    - condition: kube-prometheus-stack.enabled
+      name: kube-prometheus-stack
+      repository: https://prometheus-community.github.io/helm-charts
+      version: 12.0.0
+    - condition: cp-helm-charts.enabled
+      name: cp-helm-charts
+      repository: https://soerenhenning.github.io/cp-helm-charts
+      version: 0.6.0
+    - condition: kafka-lag-exporter.enabled
+      name: kafka-lag-exporter
+      repository: https://lightbend.github.io/kafka-lag-exporter/repo/
+      version: 0.6.6
+    description: Theodolite is a framework for benchmarking the scalability stream
+      processing engines.
+    digest: 8a4f218e44341eb8fb09ddc58c6aaa0a14aded685f3423088c21fe0ffc112281
+    home: https://cau-se.github.io/theodolite
+    maintainers:
+    - email: soeren.henning@email.uni-kiel.de
+      name: Sören Henning
+      url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
+    name: theodolite
+    sources:
+    - https://github.com/cau-se/theodolite
+    type: application
+    urls:
+    - https://github.com/cau-se/theodolite/releases/download/v0.5.0/theodolite-0.5.0.tgz
+    version: 0.5.0
   - apiVersion: v2
     appVersion: 0.4.0
     created: "2021-03-18T15:50:50.930902088+01:00"
@@ -36,4 +106,4 @@ entries:
     urls:
     - https://github.com/cau-se/theodolite/releases/download/v0.4.0/theodolite-0.4.0.tgz
     version: 0.4.0
-generated: "2021-03-18T15:50:50.897801281+01:00"
+generated: "2021-11-12T16:15:01.591258889+01:00"
diff --git a/docs/release-process.md b/docs/release-process.md
index 981306b0762e43eacb29a434cc1e505593548fce..103d8d1ac65472459bcaad648f921240eaf508c8 100644
--- a/docs/release-process.md
+++ b/docs/release-process.md
@@ -18,8 +18,11 @@ again be merged into master.
 
 3. Update all references to artifacts which are versioned. This includes:
 
-    1. Update all references to Theodolite Docker images to tag `v0.3.1`. These are the Kubernetes resource definitions in
-`execution`, the references to *latest* in `run_uc.py`, the Docker Compose files in `theodolite-benchmarks/docker-test` and the example `theodolite.yaml` job.
+    1. Update all references to Theodolite Docker images to tag `v0.3.1`. These are:
+        1. the default `helm/values.yaml` file,
+        2. the example `execution/theodolite.yaml` job,
+        3. the Kubernetes benchmark resources in `theodolite-benchmarks/definitions/**/resources` and
+        2. the Docker Compose files in `theodolite-benchmarks/docker-test`.
 
     2. Update both, the `version` and the `appVersion` fields, in the Helm `Charts.yaml` file to `0.3.1`.
 
diff --git a/execution/.gitignore b/execution/.gitignore
deleted file mode 100644
index bac9a5d1eeb12d9e40d38376904e8fb69c0e5231..0000000000000000000000000000000000000000
--- a/execution/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-exp_counter.txt
-results
diff --git a/helm/Chart.yaml b/helm/Chart.yaml
index b09b4022d6727029311815b3b2b1bfcf4b4d2bd1..0e56a156832ed6f9159f436ec63f825d132e8dd3 100644
--- a/helm/Chart.yaml
+++ b/helm/Chart.yaml
@@ -13,11 +13,11 @@ type: application
 
 dependencies:
   - name: grafana
-    version: 6.0.0
+    version: 6.17.5
     repository: https://grafana.github.io/helm-charts
     condition: grafana.enabled
   - name: kube-prometheus-stack
-    version:  12.0.0
+    version:  20.0.1
     repository: https://prometheus-community.github.io/helm-charts
     condition: kube-prometheus-stack.enabled
   - name: cp-helm-charts
@@ -25,10 +25,10 @@ dependencies:
     repository: https://soerenhenning.github.io/cp-helm-charts
     condition: cp-helm-charts.enabled
   - name: kafka-lag-exporter
-    version: 0.6.6
+    version: 0.6.7
     repository: https://lightbend.github.io/kafka-lag-exporter/repo/
     condition: kafka-lag-exporter.enabled
 
-version: 0.5.0-SNAPSHOT
+version: 0.6.0-SNAPSHOT
 
-appVersion: 0.5.0-SNAPSHOT
+appVersion: 0.6.0-SNAPSHOT
diff --git a/helm/README.md b/helm/README.md
index 078c9c9a2b3f896d5cf5a30e7c2540a36f8057e4..1a3428b5e601de0c6c33f9dab236321e95592c6c 100644
--- a/helm/README.md
+++ b/helm/README.md
@@ -2,55 +2,47 @@
 
 ## Installation
 
-Install the chart via:
+The Theodolite Helm chart with all its dependencies can be installed via:
 
 ```sh
 helm dependencies update .
 helm install theodolite .
 ```
 
-This chart installs requirements to execute benchmarks with Theodolite.
+## Customize Installation
 
-Dependencies and subcharts:
+As usual, the installation with Helm can be configured by passing a values YAML file:
 
-- Prometheus Operator
-- Prometheus
-- Grafana (incl. dashboard and data source configuration)
-- Kafka
-- Zookeeper
-- A Kafka client pod
-
-## Test
-
-Test the installation:
-
-```sh
-helm test theodolite
+```
+helm install theodolite . -f <your-config.yaml>
 ```
 
-Our test files are located [here](templates/../../theodolite-chart/templates/tests). Many subcharts have their own tests, these are also executed and are placed in the respective /templates folders. 
-
-Please note: If a test fails, Helm will stop testing.
+We provide a minimal configuration, especially suited for development environments, with the `preconfigs/minimal.yaml`
+file.
 
-It is possible that the tests are not running successfully at the moment. This is because the Helm tests of the subchart cp-confluent receive a timeout exception. There is an [issue](https://github.com/confluentinc/cp-helm-charts/issues/318) for this problem on GitHub.
+Per default, Helm installs the Theodolite CRDs used for the operator. If Theodolite will not be used as operator or if
+the CRDs are already installed, you can skip their installation by adding the flag `--skip-crds`.
 
-## Configuration
+## Test Installation
 
-In development environments Kubernetes resources are often low. To reduce resource consumption, we provide an `one-broker-value.yaml` file. This file can be used with:
+Test the installation with:
 
 ```sh
-helm install theodolite . -f preconfigs/one-broker-values.yaml
+helm test theodolite
 ```
 
+Our test files are located [here](templates/tests). Many subcharts have their own tests, which are also executed.
+Please note: If a test fails, Helm will stop testing.
+
 ## Uninstall this Chart
 
-To uninstall/delete the `theodolite` deployment (by default Helm will be install all CRDs (`execution` and `benchmark`) automatically. If Helm should not install these CRDs, use the flag `--skip-crds`)
+The Theodolite Helm can easily be removed with:
 
 ```sh
 helm uninstall theodolite
 ```
 
-This command does not remove the CRDs which are created by this chart. Remove them manually with:
+Helm does not remove any CRDs created by this chart. You can remove them manually with:
 
 ```sh
 # CRDs from Theodolite
@@ -69,9 +61,20 @@ kubectl delete crd thanosrulers.monitoring.coreos.com
 
 ## Development
 
-**Hints**:
+### Dependencies
+
+The following 3rd party charts are used by Theodolite:
+
+- Kube Prometheus Stack (to install the Prometheus Operator, which is used to create a Prometheus instances)
+- Grafana (including a dashboard and a data source configuration)
+- Confluent Platform (for Kafka and Zookeeper)
+- Kafka Lag Exporter (used to collect monitoring data of the Kafka lag)
+
+### Hints
+
+#### Grafana
 
-- Grafana configuration: Grafana ConfigMaps contains expressions like {{ topic }}. Helm uses the same syntax for template function. More information [here](https://github.com/helm/helm/issues/2798)
+Grafana ConfigMaps contain expressions like `{{ topic }}`. Helm uses the same syntax for template function. More information [here](https://github.com/helm/helm/issues/2798)
   - Escape braces: {{ "{{" topic }}
   - Let Helm render the template as raw string: {{ `{{ <config>}}` }}
   
\ No newline at end of file
diff --git a/helm/preconfigs/minimal.yaml b/helm/preconfigs/minimal.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b0828c2f424e8456933dc626a66a199cd60aa5da
--- /dev/null
+++ b/helm/preconfigs/minimal.yaml
@@ -0,0 +1,12 @@
+cp-helm-charts:
+  cp-zookeeper:
+    servers: 1
+
+  cp-kafka:
+    brokers: 1
+    configurationOverrides:
+      offsets.topic.replication.factor: "1"
+
+operator:
+  resultsVolume:
+    enabled: false
diff --git a/helm/preconfigs/oci.yaml b/helm/preconfigs/oci.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dd070a1d7983404add85cf75ded0a057a76e854f
--- /dev/null
+++ b/helm/preconfigs/oci.yaml
@@ -0,0 +1,4 @@
+operator:
+  resultsVolume:
+    storageClassName: "oci-bv"
+    size: 50Gi # minimal size in OCI
\ No newline at end of file
diff --git a/helm/preconfigs/one-broker-values.yaml b/helm/preconfigs/one-broker-values.yaml
deleted file mode 100644
index c53c1f1eb8bc7a17f192d70a6f10f8cacc09c98f..0000000000000000000000000000000000000000
--- a/helm/preconfigs/one-broker-values.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-cp-helm-charts:
-    ## ------------------------------------------------------
-    ## Zookeeper
-    ## ------------------------------------------------------
-    cp-zookeeper:
-      servers: 1 # default: 3 
-
-  ## ------------------------------------------------------
-  ## Kafka
-  ## ------------------------------------------------------
-    cp-kafka:
-        brokers: 1 # default: 10
-
-        configurationOverrides:
-          offsets.topic.replication.factor: "1"
\ No newline at end of file
diff --git a/helm/templates/_helpers.tpl b/helm/templates/_helpers.tpl
index f59f74d369b64ec89a44cbf2048fda9e844df92b..b530b553ea90671aba8154cc250a20924bae0183 100644
--- a/helm/templates/_helpers.tpl
+++ b/helm/templates/_helpers.tpl
@@ -60,3 +60,10 @@ Create the name of the service account to use
 {{- default "default" .Values.serviceAccount.name }}
 {{- end }}
 {{- end }}
+
+{{/*
+Create the name of the results volume to use
+*/}}
+{{- define "theodolite.resultsClaimName" -}}
+{{- default (printf "%s-results" (include "theodolite.fullname" .)) .Values.operator.resultsVolume.existingClaim }}
+{{- end }}
diff --git a/helm/templates/theodolite/random-scheduler/deployment.yaml b/helm/templates/theodolite/random-scheduler/deployment.yaml
index a1ea535d52d3dce971806dd638a90e9acb81c5d0..55b6e4ad5f8fafccc9623e69ef1df1fccf81ed39 100644
--- a/helm/templates/theodolite/random-scheduler/deployment.yaml
+++ b/helm/templates/theodolite/random-scheduler/deployment.yaml
@@ -22,8 +22,8 @@ spec:
       serviceAccount: {{ include "theodolite.fullname" . }}-random-scheduler
       containers:
         - name: random-scheduler
-          image: ghcr.io/cau-se/theodolite-random-scheduler:latest
-          #imagePullPolicy: Always
+          image: "{{ .Values.randomScheduler.image }}:{{ .Values.randomScheduler.imageTag }}"
+          imagePullPolicy: "{{ .Values.randomScheduler.imagePullPolicy }}"
           env:
             - name: TARGET_NAMESPACE
               value: {{ .Release.Namespace }}
diff --git a/helm/templates/theodolite/results-volume/pvc.yaml b/helm/templates/theodolite/results-volume/pvc.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6dda16bc85d308c8bf0c9c41dd55cca3582f0793
--- /dev/null
+++ b/helm/templates/theodolite/results-volume/pvc.yaml
@@ -0,0 +1,18 @@
+{{- if and .Values.operator.resultsVolume.enabled (not .Values.operator.resultsVolume.existingClaim) -}}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: {{ include "theodolite.resultsClaimName" . }}
+spec:
+  {{- if .Values.operator.resultsVolume.storageClassName }}
+  storageClassName: {{ .Values.operator.resultsVolume.storageClassName }}
+  {{- end }}
+  accessModes:
+    - ReadWriteOnce
+    {{- range .Values.operator.resultsVolume.accessModes }}
+    - {{ . | quote }}
+    {{- end }}
+  resources:
+    requests:
+      storage: {{ .Values.operator.resultsVolume.size | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/helm/templates/theodolite/role-binding.yaml b/helm/templates/theodolite/role-binding.yaml
index 93d8c34e7bc544c3b0c231e986bc58c792cce38e..3b327bb246f9716be0939416db55fc1b2cc5dd70 100644
--- a/helm/templates/theodolite/role-binding.yaml
+++ b/helm/templates/theodolite/role-binding.yaml
@@ -1,5 +1,5 @@
 {{- if .Values.rbac.create -}}
-apiVersion: rbac.authorization.k8s.io/v1beta1
+apiVersion: rbac.authorization.k8s.io/v1
 kind: RoleBinding
 metadata:
   name:  {{ include "theodolite.fullname" . }}
diff --git a/helm/templates/theodolite/theodolite-operator.yaml b/helm/templates/theodolite/theodolite-operator.yaml
index 3487b9a4fabb16897b302d8be65f065a647ffb16..52ddcd500ab8d050187028026def84e9d057c252 100644
--- a/helm/templates/theodolite/theodolite-operator.yaml
+++ b/helm/templates/theodolite/theodolite-operator.yaml
@@ -64,6 +64,19 @@ spec:
           - name: LOG_LEVEL
             value: INFO
         {{- end }}
+        {{- if .Values.operator.sloChecker.droppedRecordsKStreams.enabled }}
+        - name: slo-checker-dropped-records-kstreams
+          image: "{{ .Values.operator.sloChecker.droppedRecordsKStreams.image }}:{{ .Values.operator.sloChecker.droppedRecordsKStreams.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.sloChecker.droppedRecordsKStreams.imagePullPolicy }}"
+          ports:
+          - containerPort: 8081
+            name: analysis
+          env:
+          - name: PORT
+            value: "8081"
+          - name: LOG_LEVEL
+            value: INFO
+        {{- end }}
         {{- if and .Values.operator.resultsVolume.enabled .Values.operator.resultsVolume.accessSidecar.enabled }}
         - name: results-access
           image: busybox:stable
@@ -81,7 +94,7 @@ spec:
       {{- if .Values.operator.resultsVolume.enabled }}
       - name: theodolite-pv-storage
         persistentVolumeClaim:
-          claimName: {{ .Values.operator.resultsVolume.persistentVolumeClaim.name | quote }}
+          claimName: {{ include "theodolite.resultsClaimName" . | quote }}
       {{- end }}
       - name: benchmark-resources-uc1-kstreams
         configMap:
diff --git a/helm/update-index.sh b/helm/update-index.sh
index 286724dd87718387df58ed993af417bf0fd4d8ec..66c55bb8b79e18e3d06d156cb1859f2a53078999 100755
--- a/helm/update-index.sh
+++ b/helm/update-index.sh
@@ -3,7 +3,7 @@
 RELEASE_NAME=$1 # Supposed to be equal to tag, e.g., v0.3.0
 
 RELEASE_PATH="https://github.com/cau-se/theodolite/releases/download"
-REPO_INDEX="../../docs/index.yaml"
+REPO_INDEX="../docs/index.yaml"
 
 helm repo index . --url $RELEASE_PATH/$RELEASE_NAME --merge $REPO_INDEX && \
   mv index.yaml $REPO_INDEX
\ No newline at end of file
diff --git a/helm/values.yaml b/helm/values.yaml
index 571cdd629c382a88ca8d4b22234d2d0907fa5fae..b0ce0faeaa7989872fdedc308d3d4c507894e0e7 100644
--- a/helm/values.yaml
+++ b/helm/values.yaml
@@ -256,11 +256,19 @@ operator:
       image: ghcr.io/cau-se/theodolite-slo-checker-lag-trend
       imageTag: latest
       imagePullPolicy: Always
+    droppedRecordsKStreams:
+      enabled: true
+      image: ghcr.io/cau-se/theodolite-slo-checker-dropped-records-kstreams
+      imageTag: latest
+      imagePullPolicy: Always
 
   resultsVolume:
     enabled: true
-    persistentVolumeClaim:
-      name: theodolite-pv-claim
+    # existingClaim:
+    # storageClassName:
+    accessModes:
+      - ReadWriteOnce
+    size: 1Gi
     accessSidecar:
       enabled: true
       image: busybox
@@ -276,6 +284,9 @@ rbac:
 
 randomScheduler:
   enabled: true
+  image: ghcr.io/cau-se/theodolite-random-scheduler
+  imageTag: latest
+  imagePullPolicy: Always
   rbac:
     create: true
   serviceAccount:
diff --git a/slope-evaluator/Dockerfile b/slo-checker/dropped-records/Dockerfile
similarity index 100%
rename from slope-evaluator/Dockerfile
rename to slo-checker/dropped-records/Dockerfile
diff --git a/slo-checker/dropped-records/README.md b/slo-checker/dropped-records/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a1ea982a399201143ad50f173c934ff58abbf4a
--- /dev/null
+++ b/slo-checker/dropped-records/README.md
@@ -0,0 +1,80 @@
+# Kafka Streams Dropped Record SLO Evaluator
+
+## Execution
+
+For development:
+
+```sh
+uvicorn main:app --reload  --port 81 # run this command inside the app/ folder
+```
+
+## Build the docker image:
+
+```sh
+docker build . -t theodolite-evaluator
+```
+
+Run the Docker image:
+
+```sh
+docker run -p 80:81 theodolite-evaluator
+```
+
+## Configuration
+
+You can set the `HOST` and the `PORT` (and a lot of more parameters) via environment variables. Default is `0.0.0.0:80`.
+For more information see the [Gunicorn/FastAPI Docker docs](https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker#advanced-usage).
+
+## API Documentation
+
+The running webserver provides a REST API with the following route:
+
+* /dropped-records
+  * Method: POST
+  * Body:
+    * results
+      * metric-metadata
+      * values
+    * metadata
+      * threshold
+      * warmup
+
+The body of the request must be a JSON string that satisfies the following conditions:
+
+* **dropped records**: This property is based on the [Range Vector type](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors) from Prometheus and must have the following JSON *structure*:
+
+    ```json
+    {
+        "results": [
+            [
+                {
+                    "metric": {
+                        "<label-name>": "<label-value>"
+                    },
+                    "values": [
+                        [
+                            <unix_timestamp>, // 1.634624989695E9
+                            "<sample_value>" // integer
+                        ]
+                    ]
+                }
+            ]
+        ],
+        "metadata": {
+            "threshold": 2000000,
+            "warmup": 60
+        }
+    }
+    ```
+
+### description
+
+* results:
+  * metric-metadata:
+    * Labels of this metric. The `dropped-records` slo checker does not use labels in the calculation of the service level objective.
+  * results
+    * The `<unix_timestamp>` provided as the first element of each element in the "values" array must be the timestamp of the measurement value in seconds (with optional decimal precision)
+    * The `<sample_value>` must be the measurement value as string.
+* metadata: For the calculation of the service level objective require metadata.
+  * **threshold**: Must be an unsigned integer that specifies the threshold for the SLO evaluation. The SLO is considered fulfilled, if the result value is below the threshold. If the result value is equal or above the threshold, the SLO is considered not fulfilled.
+  * **warmup**: Specifieds the warmup time in seconds that are ignored for evaluating the SLO.
diff --git a/slo-checker/dropped-records/app/main.py b/slo-checker/dropped-records/app/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1577f9c11ed5a2798ee0b4505ae0739301ab2a8
--- /dev/null
+++ b/slo-checker/dropped-records/app/main.py
@@ -0,0 +1,32 @@
+from fastapi import FastAPI,Request
+import logging
+import os
+import json
+import sys
+
+app = FastAPI()
+
+logging.basicConfig(stream=sys.stdout,
+                    format="%(asctime)s %(levelname)s %(name)s: %(message)s")
+logger = logging.getLogger("API")
+
+
+if os.getenv('LOG_LEVEL') == 'INFO':
+    logger.setLevel(logging.INFO)
+elif os.getenv('LOG_LEVEL') == 'WARNING':
+    logger.setLevel(logging.WARNING)
+elif os.getenv('LOG_LEVEL') == 'DEBUG':
+    logger.setLevel(logging.DEBUG)
+
+
+def check_service_level_objective(results, threshold):
+    return max(results) < threshold
+
+@app.post("/dropped-records",response_model=bool)
+async def evaluate_slope(request: Request):
+    data = json.loads(await request.body())
+    warmup = int(data['results'][0][0]['values'][0][0]) + int(data['metadata']['warmup'])
+    results = [int(val[1]) if(int(val[0]>=warmup)) else 0 for result in data['results'] for r in result for val in r['values']  ]
+    return check_service_level_objective(results=results, threshold=data['metadata']["threshold"])
+
+logger.info("SLO evaluator is online")
\ No newline at end of file
diff --git a/slo-checker/dropped-records/app/test.py b/slo-checker/dropped-records/app/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c657c914002066357d58d88d7f8e4afe920db45
--- /dev/null
+++ b/slo-checker/dropped-records/app/test.py
@@ -0,0 +1,23 @@
+import unittest
+from main import app, check_service_level_objective
+import numpy as np
+import json
+from fastapi.testclient import TestClient
+
+class TestSloEvaluation(unittest.TestCase):
+    client = TestClient(app)
+
+    def test_1_rep(self):
+        with open('../resources/test-1-rep-success.json') as json_file:
+            data = json.load(json_file)
+            response = self.client.post("/dropped-records", json=data)
+            self.assertEquals(response.json(), True)
+
+    def test_check_service_level_objective(self):
+        list = [ x for x in range(-100, 100) ]
+
+        self.assertEquals(check_service_level_objective(list, 90), False)
+        self.assertEquals(check_service_level_objective(list, 110), True)
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/slope-evaluator/requirements.txt b/slo-checker/dropped-records/requirements.txt
similarity index 100%
rename from slope-evaluator/requirements.txt
rename to slo-checker/dropped-records/requirements.txt
diff --git a/slo-checker/dropped-records/resources/test-1-rep-success.json b/slo-checker/dropped-records/resources/test-1-rep-success.json
new file mode 100644
index 0000000000000000000000000000000000000000..0964c30fed60e34c1ac4cf6b6b89f81d95a2f0eb
--- /dev/null
+++ b/slo-checker/dropped-records/resources/test-1-rep-success.json
@@ -0,0 +1,273 @@
+{
+    "results": [
+        [
+            {
+                "metric": {
+                    "job": "titan-ccp-aggregation"
+                },
+                "values": [
+                    [
+                        1.634624674695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624679695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624684695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624689695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624694695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624699695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624704695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624709695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624714695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624719695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624724695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624729695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624734695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624739695E9,
+                        "0"
+                    ],
+                    [
+                        1.634624744695E9,
+                        "1"
+                    ],
+                    [
+                        1.634624749695E9,
+                        "3"
+                    ],
+                    [
+                        1.634624754695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624759695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624764695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624769695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624774695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624779695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624784695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624789695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624794695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624799695E9,
+                        "4"
+                    ],
+                    [
+                        1.634624804695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624809695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624814695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624819695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624824695E9,
+                        "176"
+                    ],
+                    [
+                        1.634624829695E9,
+                        "159524"
+                    ],
+                    [
+                        1.634624834695E9,
+                        "209870"
+                    ],
+                    [
+                        1.634624839695E9,
+                        "278597"
+                    ],
+                    [
+                        1.634624844695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624849695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624854695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624859695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624864695E9,
+                        "460761"
+                    ],
+                    [
+                        1.634624869695E9,
+                        "606893"
+                    ],
+                    [
+                        1.634624874695E9,
+                        "653534"
+                    ],
+                    [
+                        1.634624879695E9,
+                        "755796"
+                    ],
+                    [
+                        1.634624884695E9,
+                        "919317"
+                    ],
+                    [
+                        1.634624889695E9,
+                        "919317"
+                    ],
+                    [
+                        1.634624894695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624899695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624904695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624909695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624914695E9,
+                        "955926"
+                    ],
+                    [
+                        1.634624919695E9,
+                        "1036530"
+                    ],
+                    [
+                        1.634624924695E9,
+                        "1078477"
+                    ],
+                    [
+                        1.634624929695E9,
+                        "1194775"
+                    ],
+                    [
+                        1.634624934695E9,
+                        "1347755"
+                    ],
+                    [
+                        1.634624939695E9,
+                        "1352151"
+                    ],
+                    [
+                        1.634624944695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624949695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624954695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624959695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624964695E9,
+                        "1360428"
+                    ],
+                    [
+                        1.634624969695E9,
+                        "1525685"
+                    ],
+                    [
+                        1.634624974695E9,
+                        "1689296"
+                    ],
+                    [
+                        1.634624979695E9,
+                        "1771358"
+                    ],
+                    [
+                        1.634624984695E9,
+                        "1854284"
+                    ],
+                    [
+                        1.634624989695E9,
+                        "1854284"
+                    ]
+                ]
+            }
+        ]
+    ],
+    "metadata": {
+        "threshold": 2000000,
+        "warmup": 60
+    }
+}
\ No newline at end of file
diff --git a/slo-checker/record-lag/Dockerfile b/slo-checker/record-lag/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..032b8153a6989ca04631ba553289dacb3620a38d
--- /dev/null
+++ b/slo-checker/record-lag/Dockerfile
@@ -0,0 +1,6 @@
+FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7
+
+COPY requirements.txt requirements.txt
+RUN pip install -r requirements.txt
+
+COPY ./app /app
\ No newline at end of file
diff --git a/slo-checker/record-lag/README.md b/slo-checker/record-lag/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b4882eeaf54aadfb8cbf33a957e6052a7b74123b
--- /dev/null
+++ b/slo-checker/record-lag/README.md
@@ -0,0 +1,80 @@
+# Lag Trend SLO Evaluator
+
+## Execution
+
+For development:
+
+```sh
+uvicorn main:app --reload # run this command inside the app/ folder
+```
+
+## Build the docker image:
+
+```sh
+docker build . -t theodolite-evaluator
+```
+
+Run the Docker image:
+
+```sh
+docker run -p 80:80 theodolite-evaluator
+```
+
+## Configuration
+
+You can set the `HOST` and the `PORT` (and a lot of more parameters) via environment variables. Default is `0.0.0.0:80`.
+For more information see the [Gunicorn/FastAPI Docker docs](https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker#advanced-usage).
+
+# API Documentation
+
+The running webserver provides a REST API with the following route:
+
+* /dropped-records
+  * Method: POST
+  * Body:
+    * results
+      * metric-metadata
+      * values
+    * metadata
+      * threshold
+      * warmup
+
+The body of the request must be a JSON string that satisfies the following conditions:
+
+* **total_lag**: This property is based on the [Range Vector type](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors) from Prometheus and must have the following JSON *structure*:
+
+    ```json
+    {
+        "results": [
+            [
+                {
+                    "metric": {
+                        "<label-name>": "<label-value>"
+                    },
+                    "values": [
+                        [
+                            <unix_timestamp>, // 1.634624989695E9
+                            "<sample_value>" // integer
+                        ]
+                    ]
+                }
+            ]
+        ],
+        "metadata": {
+            "threshold": 2000000,
+            "warmup": 60
+        }
+    }
+    ```
+
+### description
+
+* results:
+  * metric-metadata:
+    * Labels of this metric. The `dropped-records` slo checker does not use labels in the calculation of the service level objective.
+  * results
+    * The `<unix_timestamp>` provided as the first element of each element in the "values" array must be the timestamp of the measurement value in seconds (with optional decimal precision)
+    * The `<sample_value>` must be the measurement value as string.
+* metadata: For the calculation of the service level objective require metadata.
+  * **threshold**: Must be an unsigned integer that specifies the threshold for the SLO evaluation. The SLO is considered fulfilled, if the result value is below the threshold. If the result value is equal or above the threshold, the SLO is considered not fulfilled.
+  * **warmup**: Specifieds the warmup time in seconds that are ignored for evaluating the SLO.
diff --git a/slope-evaluator/app/main.py b/slo-checker/record-lag/app/main.py
similarity index 90%
rename from slope-evaluator/app/main.py
rename to slo-checker/record-lag/app/main.py
index 6f6788f0ca84b7710be5b509ca4f0641047e963d..621fa0cfc9c27e809fd92752de93f2795fa32c05 100644
--- a/slope-evaluator/app/main.py
+++ b/slo-checker/record-lag/app/main.py
@@ -38,7 +38,7 @@ def calculate_slope_trend(results, warmup):
         err_msg = 'Computing trend slope failed.'
         logger.exception(err_msg)
         logger.error('Mark this subexperiment as not successful and continue benchmark.')
-        return False
+        return float('inf')
 
     logger.info("Computed lag trend slope is '%s'", trend_slope)
     return trend_slope
@@ -49,7 +49,7 @@ def check_service_level_objective(results, threshold):
 @app.post("/evaluate-slope",response_model=bool)
 async def evaluate_slope(request: Request):
     data = json.loads(await request.body())
-    results = [calculate_slope_trend(total_lag, data['warmup']) for total_lag in data['total_lags']]
-    return check_service_level_objective(results=results, threshold=data["threshold"])
+    results = [calculate_slope_trend(total_lag, data['metadata']['warmup']) for total_lag in data['results']]
+    return check_service_level_objective(results=results, threshold=data['metadata']["threshold"])
 
 logger.info("SLO evaluator is online")
\ No newline at end of file
diff --git a/slope-evaluator/app/test.py b/slo-checker/record-lag/app/test.py
similarity index 99%
rename from slope-evaluator/app/test.py
rename to slo-checker/record-lag/app/test.py
index 9b165ea479bb9a552edaba7692df4fd4ef3f4ab4..c8d81f86b16255dcdce5337d8f00e922b98b4f82 100644
--- a/slope-evaluator/app/test.py
+++ b/slo-checker/record-lag/app/test.py
@@ -17,7 +17,7 @@ class TestSloEvaluation(unittest.TestCase):
             data = json.load(json_file)
             response = self.client.post("/evaluate-slope", json=data)
             self.assertEquals(response.json(), True)
-        
+
     def test_check_service_level_objective(self):
         list = [1,2,3,4]
         self.assertEquals(check_service_level_objective(list, 2), False)
diff --git a/slope-evaluator/app/trend_slope_computer.py b/slo-checker/record-lag/app/trend_slope_computer.py
similarity index 100%
rename from slope-evaluator/app/trend_slope_computer.py
rename to slo-checker/record-lag/app/trend_slope_computer.py
diff --git a/slo-checker/record-lag/requirements.txt b/slo-checker/record-lag/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8b6c3863226c2bd5e8bcd7982b2674dee593f192
--- /dev/null
+++ b/slo-checker/record-lag/requirements.txt
@@ -0,0 +1,5 @@
+fastapi==0.65.2
+scikit-learn==0.20.3
+pandas==1.0.3
+uvicorn
+requests
diff --git a/slope-evaluator/resources/test-1-rep-success.json b/slo-checker/record-lag/resources/test-1-rep-success.json
similarity index 97%
rename from slope-evaluator/resources/test-1-rep-success.json
rename to slo-checker/record-lag/resources/test-1-rep-success.json
index 9e315c707be7b2a874c58fcb1093aa86f7676560..dfe11282720ebfcdd60582b7717da892bc85a923 100644
--- a/slope-evaluator/resources/test-1-rep-success.json
+++ b/slo-checker/record-lag/resources/test-1-rep-success.json
@@ -1,5 +1,5 @@
 {
-    "total_lags": [
+    "results": [
         [
             {
                 "metric": {
@@ -134,6 +134,8 @@
             }
         ]
     ],
-    "threshold": 2000,
-    "warmup": 0
+    "metadata": {
+        "threshold": 2000,
+        "warmup": 0
+    }
 }
\ No newline at end of file
diff --git a/slope-evaluator/resources/test-3-rep-success.json b/slo-checker/record-lag/resources/test-3-rep-success.json
similarity index 98%
rename from slope-evaluator/resources/test-3-rep-success.json
rename to slo-checker/record-lag/resources/test-3-rep-success.json
index 485966cba40f01e4a646e626914510ba49b707bc..cf483f42f3783aecd1f428ac7bbbe2090c4cade0 100644
--- a/slope-evaluator/resources/test-3-rep-success.json
+++ b/slo-checker/record-lag/resources/test-3-rep-success.json
@@ -1,5 +1,5 @@
 {
-    "total_lags": [
+    "results": [
         [
             {
                 "metric": {
@@ -284,6 +284,8 @@
             }
         ]
     ],
-    "threshold": 2000,
-    "warmup": 0
+    "metadata": {
+        "threshold": 2000,
+        "warmup": 0
+    }
 }
\ No newline at end of file
diff --git a/slope-evaluator/README.md b/slope-evaluator/README.md
deleted file mode 100644
index cd9e6820ed46452ce44d57d0c7e5cd5ae05e5a3b..0000000000000000000000000000000000000000
--- a/slope-evaluator/README.md
+++ /dev/null
@@ -1,61 +0,0 @@
-# Lag Trend SLO Evaluator
-
-## Execution
-
-For development:
-
-```sh
-uvicorn main:app --reload # run this command inside the app/ folder
-```
-
-## Build the docker image:
-
-```sh
-docker build . -t theodolite-evaluator
-```
-
-Run the Docker image:
-
-```sh
-docker run -p 80:80 theodolite-evaluator
-```
-
-## Configuration
-
-You can set the `HOST` and the `PORT` (and a lot of more parameters) via environment variables. Default is `0.0.0.0:80`.
-For more information see the [Gunicorn/FastAPI Docker docs](https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker#advanced-usage).
-
-## API Documentation
-
-The running webserver provides a REST API with the following route:
-
-* /evaluate-slope
-    * Method: POST
-    * Body:
-        * total_lags
-        * threshold
-        * warmup
-
-The body of the request must be a JSON string that satisfies the following conditions:
-
-* **total_lag**: This property is based on the [Range Vector type](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors) from Prometheus and must have the following JSON structure:
-    ```
-        { 
-            [
-                "metric": {
-                    "group": "<label_value>"
-                },
-                "values": [
-                    [
-                        <unix_timestamp>,
-                        "<sample_value>"
-                    ]
-                ]
-            ]
-        }
-    ```
-    * The `<label_value>` provided in "metric.group" must be equal to the id of the Kafka consumer group.
-    * The `<unix_timestamp>` provided as the first element of each element in the "values" array must be the timestamp of the measurement value in seconds (with optional decimal precision)
-    * The `<sample_value>` must be the measurement value as string.
-* **threshold**: Must be an unsigned integer that specifies the threshold for the SLO evaluation. The SLO is considered fulfilled, if the result value is below the threshold. If the result value is equal or above the threshold, the SLO is considered not fulfilled.
-* **warmup**: Specifieds the warmup time in seconds that are ignored for evaluating the SLO.
\ No newline at end of file
diff --git a/theodolite-benchmarks/uc1-beam-flink/src/main/java/application/Uc1ApplicationBeam.java b/theodolite-benchmarks/uc1-beam-flink/src/main/java/application/Uc1ApplicationBeam.java
new file mode 100644
index 0000000000000000000000000000000000000000..b794146c0ae6a1194590fa08562836de1d43c68f
--- /dev/null
+++ b/theodolite-benchmarks/uc1-beam-flink/src/main/java/application/Uc1ApplicationBeam.java
@@ -0,0 +1,115 @@
+package application;
+
+import com.google.gson.Gson;
+import java.util.Properties;
+import org.apache.beam.runners.flink.FlinkRunner;
+import org.apache.beam.sdk.Pipeline;
+import org.apache.beam.sdk.coders.AvroCoder;
+import org.apache.beam.sdk.coders.CoderRegistry;
+import org.apache.beam.sdk.transforms.DoFn;
+import org.apache.beam.sdk.transforms.MapElements;
+import org.apache.beam.sdk.transforms.PTransform;
+import org.apache.beam.sdk.transforms.ParDo;
+import org.apache.beam.sdk.transforms.SimpleFunction;
+import org.apache.beam.sdk.values.KV;
+import org.apache.beam.sdk.values.PBegin;
+import org.apache.beam.sdk.values.PCollection;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import theodolite.commons.beam.AbstractBeamService;
+import theodolite.commons.beam.ConfigurationKeys;
+import theodolite.commons.beam.kafka.KafkaActivePowerRecordReader;
+import titan.ccp.model.records.ActivePowerRecord;
+
+
+/**
+ * Implementation of the use case Database Storage using Apache Beam with the Flink Runner. To
+ * execute locally in standalone start Kafka, Zookeeper, the schema-registry and the workload
+ * generator using the delayed_startup.sh script. Start a Flink cluster and pass its REST adress
+ * using--flinkMaster as run parameter. To persist logs add
+ * ${workspace_loc:/uc1-application-samza/eclipseConsoleLogs.log} as Output File under Standard
+ * Input Output in Common in the Run Configuration Start via Eclipse Run.
+ */
+public final class Uc1ApplicationBeam extends AbstractBeamService {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(Uc1ApplicationBeam.class);
+  private final String inputTopic = CONFIG.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
+  private final String bootstrapServer =
+      CONFIG.getString(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS);
+
+  /**
+   * Private constructor setting specific options for this use case.
+   */
+  private Uc1ApplicationBeam(final String[] args) { //NOPMD
+    super(args);
+    LOGGER.info(this.options.toString());
+    this.options.setRunner(FlinkRunner.class);
+  }
+
+  /**
+   * Main method.
+   */
+  @SuppressWarnings({"unchecked", "rawtypes", "unused"})
+  public static void main(final String[] args) {
+
+    final Uc1ApplicationBeam uc1 = new Uc1ApplicationBeam(args);
+
+    // create pipeline
+    final Pipeline pipeline = Pipeline.create(uc1.options);
+
+    // Set Coders for Classes that will be distributed
+    final CoderRegistry cr = pipeline.getCoderRegistry();
+    cr.registerCoderForClass(ActivePowerRecord.class, AvroCoder.of(ActivePowerRecord.SCHEMA$));
+
+    // build KafkaConsumerConfig
+    final Properties consumerConfig = uc1.buildConsumerConfig();
+
+    // Create Pipeline transformations
+    final PTransform<PBegin, PCollection<KV<String, ActivePowerRecord>>> kafka =
+        new KafkaActivePowerRecordReader(uc1.bootstrapServer, uc1.inputTopic, consumerConfig);
+
+    final LogKeyValue logKeyValue = new LogKeyValue();
+
+    // Apply pipeline transformations
+    // Read from Kafka
+    pipeline.apply(kafka)
+        // Map to Gson
+        .apply(MapElements
+            .via(
+                new SimpleFunction<KV<String, ActivePowerRecord>, KV<String, String>>() {
+                  private transient Gson gsonObj = new Gson();
+
+                  @Override
+                  public KV<String, String> apply(
+                      final KV<String, ActivePowerRecord> kv) {
+                    if (this.gsonObj == null) {
+                      this.gsonObj = new Gson();
+                    }
+                    final String gson = this.gsonObj.toJson(kv.getValue());
+                    return KV.of(kv.getKey(), gson);
+                  }
+                }))
+        // Print to console
+        .apply(ParDo.of(logKeyValue));
+    // Submit job and start execution
+    pipeline.run().waitUntilFinish();
+  }
+
+
+  /**
+   * Logs all Key Value pairs.
+   */
+  @SuppressWarnings({"unused"})
+  private static class LogKeyValue extends DoFn<KV<String, String>, KV<String, String>> {
+    private static final long serialVersionUID = 4328743;
+
+    @ProcessElement
+    public void processElement(@Element final KV<String, String> kv,
+                               final OutputReceiver<KV<String, String>> out) {
+      if (LOGGER.isInfoEnabled()) {
+        LOGGER.info("Key: " + kv.getKey() + "Value: " + kv.getValue());
+      }
+    }
+  }
+}
+
diff --git a/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt b/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
index f5054dc2d8c3525562118b559ab8987215dc4ea1..addf30acde31ee8e3e53c20a5e2b57a03587d08e 100644
--- a/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
+++ b/theodolite/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
@@ -115,10 +115,10 @@ class TheodoliteExecutor(
         val ioHandler = IOHandler()
         val resultsFolder = ioHandler.getResultFolderURL()
         this.config.executionId = getAndIncrementExecutionID(resultsFolder + "expID.txt")
-        ioHandler.writeToJSONFile(this.config, "$resultsFolder${this.config.executionId}-execution-configuration")
+        ioHandler.writeToJSONFile(this.config, "${resultsFolder}exp${this.config.executionId}-execution-configuration")
         ioHandler.writeToJSONFile(
             kubernetesBenchmark,
-            "$resultsFolder${this.config.executionId}-benchmark-configuration"
+            "${resultsFolder}exp${this.config.executionId}-benchmark-configuration"
         )
 
         val config = buildConfig()
@@ -130,7 +130,7 @@ class TheodoliteExecutor(
         }
         ioHandler.writeToJSONFile(
             config.compositeStrategy.benchmarkExecutor.results,
-            "$resultsFolder${this.config.executionId}-result"
+            "${resultsFolder}exp${this.config.executionId}-result"
         )
     }