diff --git a/analysis/README.md b/analysis/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..5318425825a51b4ab118bb1c6c11dfc92037c6a1
--- /dev/null
+++ b/analysis/README.md
@@ -0,0 +1,22 @@
+# Theodolite Analysis
+
+This directory contains Jupyter notebooks for analyzing and visualizing
+benchmark execution results and plotting. The following notebooks are provided:
+
+* [scalability-graph.ipynb](scalability-graph.ipynb): Creates a scalability graph for a certain benchmark execution.
+* [scalability-graph-final.ipynb](scalability-graph-final.ipynb): Combines the scalability graphs of multiple benchmarks executions (e.g. for comparing different configuration).
+* [lag-trend-graph.ipynb](lag-trend-graph.ipynb): Visualizes the consumer lag evaluation over time along with the computed trend.
+
+## Usage
+
+For executing benchmarks and analyzing their results, a **Python 3.7**
+installation is required (e.g., in a virtual environment). Our notebooks require some
+Python libraries, which can be installed via:
+
+```sh
+pip install -r requirements.txt 
+```
+
+ We have tested these
+notebooks with [Visual Studio Code](https://code.visualstudio.com/docs/python/jupyter-support),
+however, every other server should be fine as well.
diff --git a/execution/lag-trend-graph.ipynb b/analysis/lag-trend-graph.ipynb
similarity index 91%
rename from execution/lag-trend-graph.ipynb
rename to analysis/lag-trend-graph.ipynb
index 71cd54ceefbcce4548e118a9dd0ab484df52a207..4e574ceb6a6273a7299bb50d9e81598002c330f5 100644
--- a/execution/lag-trend-graph.ipynb
+++ b/analysis/lag-trend-graph.ipynb
@@ -20,8 +20,9 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "directory = ''\n",
-    "filename = 'xxx_totallag.csv'\n",
+    "directory = '<path-to>/results'\n",
+    "#filename = 'exp1002_uc3_75000_1_totallag.csv'\n",
+    "filename = 'exp1002_uc3_50000_2_totallag.csv'\n",
     "warmup_sec = 60\n",
     "threshold = 2000 #slope"
    ]
@@ -105,20 +106,6 @@
     "\n",
     "plt.savefig(\"plot.pdf\", bbox_inches='tight')\n"
    ]
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
-  },
-  {
-   "cell_type": "code",
-   "execution_count": null,
-   "metadata": {},
-   "outputs": [],
-   "source": []
   }
  ],
  "metadata": {
@@ -138,7 +125,7 @@
   "pygments_lexer": "ipython3",
   "version": 3,
   "kernelspec": {
-   "name": "python37064bitvenvvenv469ea2e0a7854dc7b367eee45386afee",
+   "name": "python37064bitvenvvenv21b61136d7f443749f2918b47e00d223",
    "display_name": "Python 3.7.0 64-bit ('.venv': venv)"
   }
  },
diff --git a/analysis/requirements.txt b/analysis/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c97a862620dfc9cd9602fe02e420752b077c6c0a
--- /dev/null
+++ b/analysis/requirements.txt
@@ -0,0 +1,4 @@
+jupyter==1.0.0
+matplotlib==3.2.0
+pandas==1.0.1
+scikit-learn==0.22.2.post1
\ No newline at end of file
diff --git a/execution/scalability-graph-finish.ipynb b/analysis/scalability-graph-finish.ipynb
similarity index 98%
rename from execution/scalability-graph-finish.ipynb
rename to analysis/scalability-graph-finish.ipynb
index ffcf33b6b044a7f5f354b682a5cafc3c3f42e2f0..8cadff0daee03f0ed0c2fa0ac0c7c72b462f340d 100644
--- a/execution/scalability-graph-finish.ipynb
+++ b/analysis/scalability-graph-finish.ipynb
@@ -18,7 +18,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "directory = '../results-inst'\n",
+    "directory = '<path-to>/results-inst'\n",
     "\n",
     "experiments = {\n",
     "    'exp1003': 'exp1003',\n",
diff --git a/execution/scalability-graph.ipynb b/analysis/scalability-graph.ipynb
similarity index 95%
rename from execution/scalability-graph.ipynb
rename to analysis/scalability-graph.ipynb
index 752c0bebc901e756e18d4b11fc0d8ae02cddcf13..868f950dfea091b8fd6dbc78dc4b7471086c8947 100644
--- a/execution/scalability-graph.ipynb
+++ b/analysis/scalability-graph.ipynb
@@ -16,7 +16,6 @@
    "outputs": [],
    "source": [
     "import os\n",
-    "import requests\n",
     "from datetime import datetime, timedelta, timezone\n",
     "import pandas as pd\n",
     "from sklearn.linear_model import LinearRegression\n",
@@ -38,11 +37,13 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "exp_id = 1003\n",
+    "exp_id = 2012\n",
     "warmup_sec = 60\n",
     "warmup_partitions_sec = 120\n",
     "threshold = 2000 #slope\n",
-    "directory = '../results'\n"
+    "#directory = '../results'\n",
+    "directory = '<path-to>/results'\n",
+    "directory_out = '<path-to>/results-inst'\n"
    ]
   },
   {
@@ -244,7 +245,7 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "min_suitable_instances.to_csv(f'../results-inst/exp{exp_id}_min-suitable-instances.csv', index=False)"
+    "min_suitable_instances.to_csv(os.path.join(directory_out, f'../results-inst/exp{exp_id}_min-suitable-instances.csv'), index=False)"
    ]
   },
   {
@@ -284,7 +285,7 @@
   "pygments_lexer": "ipython3",
   "version": 3,
   "kernelspec": {
-   "name": "python37064bitvenvvenv469ea2e0a7854dc7b367eee45386afee",
+   "name": "python37064bitvenvvenv6c432ee1239d4f3cb23f871068b0267d",
    "display_name": "Python 3.7.0 64-bit ('.venv': venv)"
   }
  },
diff --git a/execution/README.md b/execution/README.md
index 56cc6dce6bad80facef5ffe9183cabe9ca8373b0..7dd9bf5d13c1eb578d67f03aefdbb7a47558e533 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -1,13 +1,20 @@
-# Requirements
+# Theodolite Execution Framework
 
+This directory contains the Theodolite framework for executing scalability
+benchmarks in a Kubernetes cluster. As Theodolite aims for executing benchmarks
+in realistic execution environments,, some third-party components are [required](#requirements).
+After everything is installed and configured, you can move on the [execution of
+benchmarks](#execution).
 
-## Kubernetes Cluster
+## Requirements
+
+### Kubernetes Cluster
 
 For executing benchmarks, access to Kubernetes cluster is required. We suggest
 to create a dedicated namespace for executing our benchmarks. The following
 services need to be available as well.
 
-### Prometheus
+#### Prometheus
 
 We suggest to use the [Prometheus Operator](https://github.com/coreos/prometheus-operator)
 and create a dedicated Prometheus instance for these benchmarks.
@@ -34,7 +41,7 @@ depending on your cluster's security policies.
 For the individual benchmarking components to be monitored, [ServiceMonitors](https://github.com/coreos/prometheus-operator#customresourcedefinitions)
 are used. See the corresponding sections below for how to install them.
 
-### Grafana
+#### Grafana
 
 As with Prometheus, we suggest to create a dedicated Grafana instance. Grafana
 with our default configuration can be installed with Helm:
@@ -60,7 +67,7 @@ Create the Configmap for the data source:
 kubectl apply -f infrastructure/grafana/prometheus-datasource-config-map.yaml
 ```
 
-### A Kafka cluster
+#### A Kafka cluster
 
 One possible way to set up a Kafka cluster is via [Confluent's Helm Charts](https://github.com/confluentinc/cp-helm-charts).
 For using these Helm charts and conjuction with the Prometheus Operator (see
@@ -68,7 +75,7 @@ below), we provide a [patch](https://github.com/SoerenHenning/cp-helm-charts)
 for these helm charts. Note that this patch is only required for observation and
 not for the actual benchmark execution and evaluation.
 
-#### Our patched Confluent Helm Charts
+##### Our patched Confluent Helm Charts
 
 To use our patched Confluent Helm Charts clone the
 [chart's repsoitory](https://github.com/SoerenHenning/cp-helm-charts). We also
@@ -86,11 +93,11 @@ To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor:
 kubectl apply -f infrastructure/kafka/service-monitor.yaml
 ```
 
-#### Other options for Kafka
+##### Other options for Kafka
 
 Other Kafka deployments, for example, using Strimzi, should work in similiar way.
 
-### The Kafka Lag Exporter
+#### The Kafka Lag Exporter
 
 [Lightbend's Kafka Lag Exporter](https://github.com/lightbend/kafka-lag-exporter)
 can be installed via Helm. We also provide a [default configuration](infrastructure/kafka-lag-exporter/values.yaml).
@@ -107,21 +114,19 @@ kubectl apply -f infrastructure/kafka-lag-exporter/service-monitor.yaml
 ```
 
 
-## Python 3.7
-
-For executing benchmarks and analyzing their results, a **Python 3.7** installation
-is required. We suggest to use a virtual environment placed in the `.venv` directory.
+### Python 3.7
 
-As set of requirements is needed for the analysis Jupyter notebooks and the
-execution tool. You can install them with the following command (make sure to
-be in your virtual environment if you use one):
+For executing benchmarks, a **Python 3.7** installation is required. We suggest
+to use a virtual environment placed in the `.venv` directory (in the Theodolite
+root directory). As set of requirements is needed. You can install them with the following
+command (make sure to be in your virtual environment if you use one):
 
 ```sh
 pip install -r requirements.txt 
 ```
 
 
-## Required Manual Adjustments
+### Required Manual Adjustments
 
 Depending on your setup, some additional adjustments may be necessary:
 
@@ -133,7 +138,7 @@ Depending on your setup, some additional adjustments may be necessary:
 
 
 
-# Execution
+## Execution
 
 The `./theodolite.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
 
diff --git a/execution/requirements.txt b/execution/requirements.txt
index 17f29b0b16a3f130399612c7bffd3ce12896c946..7224efe80aa1686bb3de90b2beac5df47a56ed8f 100644
--- a/execution/requirements.txt
+++ b/execution/requirements.txt
@@ -1,62 +1,4 @@
-attrs==19.3.0
-backcall==0.1.0
-bleach==3.1.1
-certifi==2019.11.28
-chardet==3.0.4
-cycler==0.10.0
-decorator==4.4.2
-defusedxml==0.6.0
-entrypoints==0.3
-idna==2.9
-importlib-metadata==1.5.0
-ipykernel==5.1.4
-ipython==7.13.0
-ipython-genutils==0.2.0
-ipywidgets==7.5.1
-jedi==0.16.0
-Jinja2==2.11.1
-joblib==0.14.1
-jsonschema==3.2.0
-jupyter==1.0.0
-jupyter-client==6.0.0
-jupyter-console==6.1.0
-jupyter-core==4.6.3
-kiwisolver==1.1.0
-MarkupSafe==1.1.1
 matplotlib==3.2.0
-mistune==0.8.4
-nbconvert==5.6.1
-nbformat==5.0.4
-notebook==6.0.3
-numpy==1.18.1
 pandas==1.0.1
-pandocfilters==1.4.2
-parso==0.6.2
-pexpect==4.8.0
-pickleshare==0.7.5
-prometheus-client==0.7.1
-prompt-toolkit==3.0.4
-ptyprocess==0.6.0
-Pygments==2.6.1
-pyparsing==2.4.6
-pyrsistent==0.15.7
-python-dateutil==2.8.1
-pytz==2019.3
-pyzmq==19.0.0
-qtconsole==4.7.1
-QtPy==1.9.0
 requests==2.23.0
-scikit-learn==0.22.2.post1
-scipy==1.4.1
-Send2Trash==1.5.0
-six==1.14.0
-sklearn==0.0
-terminado==0.8.3
-testpath==0.4.4
-tornado==6.0.4
-traitlets==4.3.3
-urllib3==1.25.8
-wcwidth==0.1.8
-webencodings==0.5.1
-widgetsnbextension==3.5.1
-zipp==3.1.0
+scikit-learn==0.22.2.post1
\ No newline at end of file