diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index dd6b13e2b4a99b1dca8fec330f7ff618eff87ce2..c9f923b4eb3376d3910a1b3b26dfac2b75b16692 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -37,10 +37,11 @@ lint-helm:
     GRADLE_OPTS: "-Dorg.gradle.daemon=false"
   cache:
     paths:
-      - .gradle
+      - .gradle/wrapper
+      - .gradle/caches
   before_script:
-    - cd theodolite-benchmarks
     - export GRADLE_USER_HOME=`pwd`/.gradle
+    - cd theodolite-benchmarks
 
 build-benchmarks:
   stage: build
@@ -212,13 +213,86 @@ deploy-uc4-load-generator:
 
 # Theodolite Framework
 
+.theodolite:
+  image:
+    name: ghcr.io/graalvm/native-image:java11-21.1.0
+    entrypoint: [""]
+  tags:
+    - exec-docker
+  variables:
+    GRADLE_OPTS: "-Dorg.gradle.daemon=false"
+  cache:
+    paths:
+      - .gradle/wrapper
+      - .gradle/caches
+  before_script:
+    - export GRADLE_USER_HOME=`pwd`/.gradle
+    - cd theodolite-quarkus
+
+build-theodolite-jvm:
+  stage: build
+  extends: .theodolite
+  script: ./gradlew --build-cache assemble
+  artifacts:
+    paths:
+      - "theodolite-quarkus/build/lib/*"
+      - "theodolite-quarkus/build/*-runner.jar"
+    expire_in: 6 hours
+
+build-theodolite-native:
+  stage: build
+  extends: .theodolite
+  script:
+    - ./gradlew --build-cache assemble -Dquarkus.package.type=native
+  when: manual
+  artifacts:
+    paths:
+      - "theodolite-quarkus/build/*-runner"
+    expire_in: 6 hours
+
+test-theodolite:
+  stage: test
+  extends: .theodolite
+  needs:
+    - build-theodolite-jvm
+    #- build-theodolite-native
+  script: ./gradlew test --stacktrace
+  artifacts:
+    reports:
+      junit:
+        - "theodolite-quarkus/**/build/test-results/test/TEST-*.xml"
+
+# Disabled for now
+.ktlint-theodolite:
+  stage: check
+  extends: .theodolite
+  needs:
+    - build-theodolite-jvm
+    - test-theodolite
+  script: ./gradlew ktlintCheck --continue
+
+# Disabled for now
+.detekt-theodolite: 
+  stage: check
+  extends: .theodolite
+  needs:
+    - build-theodolite-jvm
+    - test-theodolite
+  script: ./gradlew detekt --continue
+
 deploy-theodolite:
   stage: deploy
   extends:
+    - .theodolite
     - .dind
+  needs:
+    #- build-theodolite-native
+    - build-theodolite-jvm
+    - test-theodolite
   script:
     - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
-    - docker build --pull -t theodolite ./execution
+    #- docker build -f src/main/docker/Dockerfile.native -t theodolite .
+    - docker build -f src/main/docker/Dockerfile.jvm -t theodolite .
     - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:${DOCKER_TAG_NAME}latest"
     - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
     - "[ $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$CI_COMMIT_TAG"
@@ -229,7 +303,46 @@ deploy-theodolite:
     - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
       when: always
     - changes:
-      - execution/**/*
+      - theodolite-quarkus/**/*
+      if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: always
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
+      when: manual
+      allow_failure: true
+
+
+# Theodolite SLO Checker: Lag Trend
+
+test-slo-checker-lag-trend:
+  stage: test
+  image: python:3.7-slim
+  tags:
+    - exec-docker
+  script:
+    - cd slope-evaluator
+    - pip install -r requirements.txt
+    - cd app
+    - python -m unittest
+
+deploy-slo-checker-lag-trend:
+  stage: deploy
+  extends:
+    - .dind
+  needs:
+    - test-slo-checker-lag-trend
+  script:
+    - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
+    - docker build --pull -t theodolite-slo-checker-lag-trend slope-evaluator
+    - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-lag-trend $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend:${DOCKER_TAG_NAME}latest"
+    - "[ $CI_COMMIT_TAG ] && docker tag theodolite-slo-checker-lag-trend $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend:$CI_COMMIT_TAG"
+    - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin
+    - docker push $CR_HOST/$CR_ORG/theodolite-slo-checker-lag-trend
+    - docker logout
+  rules:
+    - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG"
+      when: always
+    - changes:
+      - slope-evaluator/**/*
       if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
       when: always
     - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
@@ -243,6 +356,7 @@ deploy-random-scheduler:
   stage: deploy
   extends:
     - .dind
+  needs: []
   script:
     - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
     - docker build --pull -t theodolite-random-scheduler execution/infrastructure/random-scheduler
diff --git a/execution/Dockerfile b/execution/Dockerfile
deleted file mode 100644
index e71bc91d9d31bea4c1598292e43d0ab7c193c3fa..0000000000000000000000000000000000000000
--- a/execution/Dockerfile
+++ /dev/null
@@ -1,15 +0,0 @@
-FROM python:3.8
-
-RUN mkdir /app
-WORKDIR /app
-ADD requirements.txt /app/
-RUN pip install -r requirements.txt
-COPY uc-workload-generator /app/uc-workload-generator
-COPY uc-application /app/uc-application
-COPY strategies /app/strategies
-COPY lib /app/lib
-COPY lag_analysis.py /app/
-COPY run_uc.py /app/
-COPY theodolite.py /app/
-
-CMD ["python", "/app/theodolite.py"]
diff --git a/execution/README.md b/execution/README.md
index ca15111c0ad7000a200c0c50427a2c2aeb75e093..eb6ade9f2aab28b6d237e9622f22da0ea5998a50 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -225,7 +225,17 @@ Theodolite locally on your machine see the description below.
 see the [Configuration](#configuration) section below. Note, that you might uncomment the `serviceAccountName` line if
 RBAC is enabled on your cluster (see installation of [Theodolite RBAC](#Theodolite-RBAC)).
 
-To start the execution of a benchmark run (with `<your-theodolite-yaml>` being your job definition):
+To start the execution of a benchmark create a ConfigMap which containts all required Kubernetes resource files for the SUT and the load generator, a ConfigMap for the execution and a ConfigMap for the benchmark.
+
+```sh
+kubectl create configmap app-resources-configmap --from-file=<folder-with-all-required-k8s-resources>
+kubectl create configmap execution-configmap --from-file=<execution.yaml>
+kubectl create configmap benchmark-configmap --from-file=<benchmark.yaml>
+```
+
+This will create three ConfigMaps. You can verify this via `kubectl get configmaps`.
+
+Start the Theodolite job (with `<your-theodolite-yaml>` being your job definition):
 
 ```sh
 kubectl create -f <your-theodolite-yaml>
@@ -241,24 +251,7 @@ Kubernetes volume.
 
 ### Configuration
 
-| Command line         | Kubernetes          | Description                                                  |
-| -------------------- | ------------------- | ------------------------------------------------------------ |
-| --uc                 | UC                  | **[Mandatory]** Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`. |
-| --loads              | LOADS               | **[Mandatory]** Values for the workload generator to be tested, should be sorted in ascending order. |
-| --instances          | INSTANCES           | **[Mandatory]** Numbers of instances to be benchmarked, should be sorted in ascending order. |
-| --duration           | DURATION            | Duration in minutes subexperiments should be executed for. *Default:* `5`. |
-| --partitions         | PARTITIONS          | Number of partitions for Kafka topics. *Default:* `40`.      |
-| --cpu-limit          | CPU_LIMIT           | Kubernetes CPU limit for a single Pod.  *Default:* `1000m`.  |
-| --memory-limit       | MEMORY_LIMIT        | Kubernetes memory limit for a single Pod. *Default:* `4Gi`.  |
-| --domain-restriction | DOMAIN_RESTRICTION  | A flag that indiciates domain restriction should be used. *Default:* not set. For more details see Section [Domain Restriction](#domain-restriction). |
-| --search-strategy    | SEARCH_STRATEGY     | The benchmarking search strategy. Can be set to `check-all`, `linear-search` or `binary-search`. *Default:* `check-all`. For more details see Section [Benchmarking Search Strategies](#benchmarking-search-strategies). |
-| --reset              | RESET               | Resets the environment before each subexperiment. Useful if execution was aborted and just one experiment should be executed. |
-| --reset-only         | RESET_ONLY          | Only resets the environment. Ignores all other parameters. Useful if execution was aborted and one want a clean state for new executions. |
-| --namespace          | NAMESPACE        | Kubernetes namespace. *Default:* `default`.  |
-| --prometheus         | PROMETHEUS_BASE_URL | Defines where to find the prometheus instance. *Default:* `http://localhost:9090` |
-| --path               | RESULT_PATH         | A directory path for the results. Relative to the Execution folder. *Default:* `results` |
-| --configurations     | CONFIGURATIONS      | Defines environment variables for the use cases and, thus, enables further configuration options. |
-| --threshold          | THRESHOLD           | The threshold for the trend slop that the search strategies use to determine that a load could be handled. *Default:* `2000` |
+Be sure, that the names of the configmap corresponds correctly to the specifications of the mounted `configmaps`, `volumes`, `mountPath`. In particular: The name of the execution file and the benchmark file must match the value of the corresponding environment variable.
 
 ### Domain Restriction
 
diff --git a/execution/helm/.gitignore b/execution/helm/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..80bf7fc709ac6d08e703fe9f24d7d5776e26830e
--- /dev/null
+++ b/execution/helm/.gitignore
@@ -0,0 +1 @@
+charts
\ No newline at end of file
diff --git a/execution/helm/README.md b/execution/helm/README.md
index 4cacd06c8181970e78cb4f62e93b77fa169fcdfa..c545804aaec8eb8ed91054f1f7ee97dd293816a4 100644
--- a/execution/helm/README.md
+++ b/execution/helm/README.md
@@ -6,11 +6,9 @@ Install the chart via:
 
 ```sh
 helm dependencies update .
-helm install my-confluent .
+helm install theodolite .
 ```
 
-**Please note: Theodolite currently uses hard-coded URLs, to connect to Kafka and Zookeeper. For that reason, the name of this chart must be `my-confluent`.** We will change this behavior soon.
-
 This chart installs requirements to execute benchmarks with Theodolite.
 
 Dependencies and subcharts:
@@ -27,7 +25,7 @@ Dependencies and subcharts:
 Test the installation:
 
 ```sh
-helm test <release-name>
+helm test theodolite
 ```
 
 Our test files are located [here](templates/../../theodolite-chart/templates/tests). Many subcharts have their own tests, these are also executed and are placed in the respective /templates folders. 
@@ -44,6 +42,31 @@ In development environments Kubernetes resources are often low. To reduce resour
 helm install theodolite . -f preconfigs/one-broker-values.yaml
 ```
 
+## Uninstall this Chart
+
+To uninstall/delete the `theodolite` deployment:
+
+```sh
+helm delete theodolite
+```
+
+This command does not remove the CRDs which are created by this chart. Remove them manually with:
+
+```sh
+# CRDs from Theodolite
+kubectl delete crd executions.theodolite.com
+kubectl delete crd benchmarks.theodolite.com
+# CRDs from Prometheus operator (see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#uninstall-chart)
+kubectl delete crd alertmanagerconfigs.monitoring.coreos.com
+kubectl delete crd alertmanagers.monitoring.coreos.com
+kubectl delete crd podmonitors.monitoring.coreos.com
+kubectl delete crd probes.monitoring.coreos.com
+kubectl delete crd prometheuses.monitoring.coreos.com
+kubectl delete crd prometheusrules.monitoring.coreos.com
+kubectl delete crd servicemonitors.monitoring.coreos.com
+kubectl delete crd thanosrulers.monitoring.coreos.com
+```
+
 ## Development
 
 **Hints**:
diff --git a/execution/helm/preconfigs/one-broker-values.yaml b/execution/helm/preconfigs/one-broker-values.yaml
index fdbc3207ee37f49cf176645851d91e62ba354d28..c53c1f1eb8bc7a17f192d70a6f10f8cacc09c98f 100644
--- a/execution/helm/preconfigs/one-broker-values.yaml
+++ b/execution/helm/preconfigs/one-broker-values.yaml
@@ -9,7 +9,7 @@ cp-helm-charts:
   ## Kafka
   ## ------------------------------------------------------
     cp-kafka:
-        brokers: 1 # deauflt: 10
+        brokers: 1 # default: 10
 
         configurationOverrides:
           offsets.topic.replication.factor: "1"
\ No newline at end of file
diff --git a/execution/helm/templates/grafana/dashboard-config-map.yaml b/execution/helm/templates/grafana/dashboard-config-map.yaml
index 1125d7833cc62e78c049436f38b854d926e2a216..41365e5efefaddc92a9f2f25f867a9d895e4ca3d 100644
--- a/execution/helm/templates/grafana/dashboard-config-map.yaml
+++ b/execution/helm/templates/grafana/dashboard-config-map.yaml
@@ -253,7 +253,7 @@ data:
         "steppedLine": false,
         "targets": [
           {
-            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag > 0)",
+            "expr": "sum by(group, topic) (kafka_consumergroup_group_lag >= 0)",
             "format": "time_series",
             "intervalFactor": 1,
             "legendFormat": "{{topic}}",
@@ -436,7 +436,7 @@ data:
         "steppedLine": false,
         "targets": [
           {
-            "expr": "sum by(group,topic) (kafka_consumergroup_group_offset > 0)",
+            "expr": "sum by(group,topic) (kafka_consumergroup_group_offset >= 0)",
             "format": "time_series",
             "intervalFactor": 1,
             "legendFormat": "{{topic}}",
@@ -527,7 +527,7 @@ data:
         "steppedLine": false,
         "targets": [
           {
-            "expr": "count by(group,topic) (kafka_consumergroup_group_offset > 0)",
+            "expr": "count by(group,topic) (kafka_consumergroup_group_offset >= 0)",
             "format": "time_series",
             "intervalFactor": 1,
             "legendFormat": "{{topic}}",
@@ -892,7 +892,7 @@ data:
         "steppedLine": false,
         "targets": [
           {
-            "expr": "sum by(group) (kafka_consumergroup_group_lag > 0)",
+            "expr": "sum by(group) (kafka_consumergroup_group_lag >= 0)",
             "format": "time_series",
             "intervalFactor": 1,
             "legendFormat": "total lag",
diff --git a/execution/helm/templates/theodolite/crd-benchmark.yaml b/execution/helm/templates/theodolite/crd-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..084480e1f9e2ef827fb145cd823bbd2f68a20bac
--- /dev/null
+++ b/execution/helm/templates/theodolite/crd-benchmark.yaml
@@ -0,0 +1,119 @@
+{{- if .Values.operator.benchmarkCRD.create -}}
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: benchmarks.theodolite.com
+spec:
+  group: theodolite.com
+  names:
+    kind: benchmark
+    plural: benchmarks
+    shortNames:
+      - bench
+  versions:
+  - name: v1
+    served: true
+    storage: true
+    schema:
+      openAPIV3Schema:
+        type: object
+        required: ["spec"]
+        properties:
+          spec:
+            type: object
+            required: []
+            properties:
+              name:
+                type: string
+              appResource:
+                type: array
+                minItems: 1
+                items:
+                  type: string
+              loadGenResource:
+                type: array
+                minItems: 1
+                items:
+                  type: string
+              resourceTypes:
+                type: array
+                minItems: 1
+                items:
+                  type: object
+                  properties:
+                    typeName:
+                      type: string
+                    patchers:
+                      type: array
+                      minItems: 1
+                      items:
+                        type: object
+                        properties:
+                          type:
+                            type: string
+                            default: ""
+                          resource:
+                            type: string
+                            default: ""
+                          properties:
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+              loadTypes:
+                type: array
+                minItems: 1
+                items:
+                  type: object
+                  properties:
+                    typeName:
+                      type: string
+                    patchers:
+                      type: array
+                      minItems: 1
+                      items:
+                        type: object
+                        properties:
+                          type:
+                            type: string
+                            default: ""
+                          resource:
+                            type: string
+                            default: ""
+                          properties:
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+              kafkaConfig:
+                type: object
+                properties:
+                  bootstrapServer:
+                    type: string
+                  topics:
+                    type: array
+                    minItems: 1
+                    items:
+                      type: object
+                      required: []
+                      properties:
+                        name:
+                          type: string
+                          default: ""
+                        numPartitions:
+                          type: integer
+                          default: 0
+                        replicationFactor:
+                          type: integer
+                          default: 0
+                        removeOnly:
+                          type: boolean
+                          default: false
+    additionalPrinterColumns:
+    - name: Age
+      type: date
+      jsonPath: .metadata.creationTimestamp
+    subresources:
+      status: {}
+  scope: Namespaced
+{{- end }}
diff --git a/execution/helm/templates/theodolite/crd-execution.yaml b/execution/helm/templates/theodolite/crd-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d9212e0e0576bb54ffcaf51a227e47f42894a742
--- /dev/null
+++ b/execution/helm/templates/theodolite/crd-execution.yaml
@@ -0,0 +1,130 @@
+{{- if .Values.operator.executionCRD.create -}}
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: executions.theodolite.com
+spec:
+  group: theodolite.com
+  names:
+    kind: execution
+    plural: executions
+    shortNames:
+      - exec
+  versions:
+  - name: v1
+    served: true
+    storage: true
+    schema:
+      openAPIV3Schema:
+        type: object
+        required: ["spec"]
+        properties:
+          spec:
+            type: object
+            required: ["benchmark", "load", "resources", "slos", "execution", "configOverrides"]
+            properties:
+              name:
+                type: string
+                default: ""
+              benchmark:
+                type: string
+              load: # definition of the load dimension
+                type: object
+                required: ["loadType", "loadValues"]
+                properties:
+                  loadType:
+                   type: string
+                  loadValues:
+                    type: array
+                    items:
+                      type: integer
+              resources: # definition of the resource dimension
+                type: object
+                required: ["resourceType", "resourceValues"]
+                properties:
+                  resourceType:
+                    type: string
+                  resourceValues:
+                    type: array
+                    items:
+                      type: integer
+              slos: # def of service level objectives
+                type: array
+                items:
+                  type: object
+                  required: ["sloType", "threshold", "prometheusUrl", "externalSloUrl", "offset", "warmup"]
+                  properties:
+                    sloType:
+                      type: string
+                    threshold:
+                      type: integer
+                    prometheusUrl:
+                      type: string
+                    externalSloUrl:
+                      type: string
+                    offset:
+                      type: integer
+                    warmup:
+                      type: integer
+              execution: # def execution config
+                type: object
+                required: ["strategy", "duration", "repetitions", "restrictions"]
+                properties:
+                  strategy:
+                    type: string
+                  duration:
+                    type: integer
+                  repetitions:
+                    type: integer
+                  loadGenerationDelay:
+                    type: integer
+                  restrictions:
+                    type: array
+                    items:
+                      type: string
+              configOverrides:
+                type: array
+                items:
+                  type: object
+                  properties:
+                    patcher:
+                      type: object
+                      properties:
+                        type:
+                          type: string
+                          default: ""
+                        resource:
+                          type: string
+                          default: ""
+                        properties:
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+                    value:
+                      type: string
+          status:
+            type: object
+            properties:
+              executionState:
+                description: ""
+                type: string
+              executionDuration:
+                description: "Duration of the execution in seconds"
+                type: string
+    additionalPrinterColumns:
+    - name: STATUS
+      type: string
+      description: State of the execution
+      jsonPath: .status.executionState
+    - name: Duration
+      type: string
+      description: Duration of the execution
+      jsonPath: .status.executionDuration
+    - name: Age
+      type: date
+      jsonPath: .metadata.creationTimestamp
+    subresources:
+      status: {}
+  scope: Namespaced
+{{- end }}
diff --git a/execution/helm/templates/theodolite/random-scheduler/cluster-role-binding.yaml b/execution/helm/templates/theodolite/random-scheduler/cluster-role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..658f75c8c5018fe5b9f47cf9619bb4ee5b26b8e5
--- /dev/null
+++ b/execution/helm/templates/theodolite/random-scheduler/cluster-role-binding.yaml
@@ -0,0 +1,14 @@
+{{- if .Values.randomScheduler.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: {{ include "theodolite.fullname" . }}-random-scheduler
+subjects:
+- kind: ServiceAccount
+  name: {{ include "theodolite.fullname" . }}-random-scheduler
+  namespace: kube-system
+roleRef:
+  kind: ClusterRole
+  apiGroup: rbac.authorization.k8s.io
+  name: system:kube-scheduler
+{{- end }}
diff --git a/execution/helm/templates/theodolite/random-scheduler/deployment.yaml b/execution/helm/templates/theodolite/random-scheduler/deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..35a6ad027b93446a2bb97e2ebd67f2e27652e99a
--- /dev/null
+++ b/execution/helm/templates/theodolite/random-scheduler/deployment.yaml
@@ -0,0 +1,30 @@
+{{- if .Values.randomScheduler.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "theodolite.fullname" . }}-random-scheduler
+  labels:
+    app: {{ include "theodolite.fullname" . }}
+    component: random-scheduler
+  namespace: kube-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: {{ include "theodolite.fullname" . }}
+      component: random-scheduler
+  template:
+    metadata:
+      labels:
+        app: {{ include "theodolite.fullname" . }}
+        component: random-scheduler
+    spec:
+      serviceAccount: {{ include "theodolite.fullname" . }}-random-scheduler
+      containers:
+        - name: random-scheduler
+          image: ghcr.io/cau-se/theodolite-random-scheduler:theodolite-kotlin-latest
+          #imagePullPolicy: Always
+          env:
+            - name: TARGET_NAMESPACE
+              value: {{ .Release.Namespace }}
+{{- end }}
diff --git a/execution/helm/templates/theodolite/random-scheduler/service-account.yaml b/execution/helm/templates/theodolite/random-scheduler/service-account.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..babfff17b46d62e7e820fcb9dc8a35d73b4e6538
--- /dev/null
+++ b/execution/helm/templates/theodolite/random-scheduler/service-account.yaml
@@ -0,0 +1,10 @@
+{{- if .Values.randomScheduler.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  namespace: kube-system
+  name: {{ include "theodolite.fullname" . }}-random-scheduler 
+  labels:
+    app: {{ include "theodolite.fullname" . }}
+    component: random-scheduler
+{{- end }}
diff --git a/execution/helm/templates/theodolite/role-binding.yaml b/execution/helm/templates/theodolite/role-binding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..93d8c34e7bc544c3b0c231e986bc58c792cce38e
--- /dev/null
+++ b/execution/helm/templates/theodolite/role-binding.yaml
@@ -0,0 +1,15 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: RoleBinding
+metadata:
+  name:  {{ include "theodolite.fullname" . }}
+  labels:
+    app:  {{ include "theodolite.name" . }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: {{ include "theodolite.fullname" . }}
+subjects:
+- kind: ServiceAccount
+  name: {{ include "theodolite.serviceAccountName" . }}
+{{- end }}
\ No newline at end of file
diff --git a/execution/helm/templates/theodolite/role.yaml b/execution/helm/templates/theodolite/role.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f175890869031bc393c2b69583ff7e9c698fef2
--- /dev/null
+++ b/execution/helm/templates/theodolite/role.yaml
@@ -0,0 +1,71 @@
+{{- if .Values.rbac.create -}}
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  name: {{ include "theodolite.fullname" . }}
+rules:
+  - apiGroups:
+    - apps
+    resources:
+    - deployments
+    - statefulsets
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+    - update
+  - apiGroups:
+    - ""
+    resources:
+    - services
+    - pods
+    - configmaps
+    verbs:
+    - update
+    - delete
+    - list
+    - get
+    - create
+  - apiGroups:
+    - ""
+    resources:
+    - pods/exec
+    verbs:
+    - create
+    - get
+  - apiGroups:
+    - monitoring.coreos.com
+    resources:
+    - servicemonitors
+    verbs:
+    - update
+    - delete
+    - list
+    - create
+  {{- if .Values.operator.enabled }}
+  - apiGroups:
+    - theodolite.com
+    resources:
+    - benchmarks
+    - executions
+    - executions/status
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+    - watch
+    - update
+    - patch
+  - apiGroups:
+    - coordination.k8s.io
+    resources:
+    - leases
+    verbs:
+    - delete
+    - get
+    - create
+    - update
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/execution/helm/templates/theodolite/serviceaccount.yaml b/execution/helm/templates/theodolite/serviceaccount.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4585b8ce413bf3d36cb986163788c353f2a4a2de
--- /dev/null
+++ b/execution/helm/templates/theodolite/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "theodolite.serviceAccountName" . }}
+  labels:
+    {{- include "theodolite.labels" . | nindent 4 }}
+  {{- with .Values.serviceAccount.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/execution/helm/templates/theodolite/theodolite-operator.yaml b/execution/helm/templates/theodolite/theodolite-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7e9194fb47956e9dfa447a5bf7f820f34bbd50d4
--- /dev/null
+++ b/execution/helm/templates/theodolite/theodolite-operator.yaml
@@ -0,0 +1,74 @@
+{{- if .Values.operator.enabled -}}
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "theodolite.fullname" . }}-operator
+spec:
+  selector:
+    matchLabels:
+      app: {{ include "theodolite.fullname" . }}
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: {{ include "theodolite.fullname" . }}
+    spec:
+      terminationGracePeriodSeconds: 0
+      serviceAccountName:  {{ include "theodolite.serviceAccountName" . }}
+      securityContext:
+        runAsUser: 0 # Set the permissions for write access to the volumes.
+      containers:
+        - name: theodolite
+          image: "{{ .Values.operator.image }}:{{ .Values.operator.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.imagePullPolicy }}"
+          env:
+            - name: NAMESPACE
+              value: {{ .Release.Namespace }}
+            - name: MODE
+              value: operator
+            - name: THEODOLITE_APP_RESOURCES
+              value: "./benchmark-resources"
+            - name: RESULTS_FOLDER
+              value: "results"
+          volumeMounts:
+            {{- if .Values.operator.resultsVolume.enabled }}
+            - name: theodolite-pv-storage
+              mountPath: "/deployments/results"
+            {{- end }}
+            - name: benchmark-resources
+              mountPath: /work/benchmark-resources
+        {{- if .Values.operator.sloChecker.lagTrend.enabled }}
+        - name: lag-trend-slo-checker
+          image: "{{ .Values.operator.sloChecker.lagTrend.image }}:{{ .Values.operator.sloChecker.lagTrend.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.sloChecker.lagTrend.imagePullPolicy }}"
+          ports:
+          - containerPort: 80
+            name: analysis
+          env:
+          - name: LOG_LEVEL
+            value: INFO
+        {{- end }}
+        {{- if and .Values.operator.resultsVolume.enabled .Values.operator.resultsVolume.accessSidecar.enabled }}
+        - name: results-access
+          image: busybox:stable
+          image: "{{ .Values.operator.resultsVolume.accessSidecar.image }}:{{ .Values.operator.resultsVolume.accessSidecar.imageTag }}"
+          imagePullPolicy: "{{ .Values.operator.resultsVolume.accessSidecar.imagePullPolicy }}"
+          command:
+          - sh
+          - -c
+          - exec tail -f /dev/null
+          volumeMounts:
+          - mountPath: /results
+            name: theodolite-pv-storage
+        {{- end }}
+      volumes:
+      {{- if .Values.operator.resultsVolume.enabled }}
+      - name: theodolite-pv-storage
+        persistentVolumeClaim:
+          claimName: {{ .Values.operator.resultsVolume.persistentVolumeClaim.name | quote }}
+      {{- end }}
+      - name: benchmark-resources
+        configMap:
+          name: benchmark-resources
+          optional: true
+{{- end }}
diff --git a/execution/helm/values.yaml b/execution/helm/values.yaml
index e84af5efcdc49b6caec44365e61ad8fccfac7813..4b970bf868092bca5571cb1ea0ba2360945c8ebf 100644
--- a/execution/helm/values.yaml
+++ b/execution/helm/values.yaml
@@ -25,8 +25,16 @@ grafana:
   adminUser: admin
   adminPassword: admin
   grafana.ini:
+    #org_name: Theodolite
+    auth.anonymous:
+      # enable anonymous access
+      enabled: true
+      org_role: Admin # Role for unauthenticated users, other valid values are `Viewer`, `Editor` and `Admin`
     users:
       default_theme: light
+    #dashboards: # the following doesn't work but is planed
+      # Path to the default home dashboard. If this value is empty, then Grafana uses StaticRootPath + "dashboards/home.json"
+      #default_home_dashboard_path: "/tmp/dashboards/k8s-dashboard.json"
   ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
   ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
   sidecar:
@@ -92,8 +100,9 @@ cp-helm-charts:
       "replica.fetch.max.bytes": "134217728" # 128 MB
       #default.replication.factor: 1
       # "min.insync.replicas": 2
-      # "auto.create.topics.enable": false
-      "log.retention.ms": "10000" # 10s
+      "auto.create.topics.enable": false
+      #"log.retention.ms": "10000" # 10s
+      "log.retention.ms": "7200000" # 2h
       "metrics.sample.window.ms": "5000" #5s
       "advertised.listeners": |-
         EXTERNAL://${HOST_IP}:$((31090 + ${KAFKA_BROKER_ID}))
@@ -148,8 +157,8 @@ kafka-lag-exporter:
   enabled: true
   nodeSelector: {}
   clusters:
-    - name: "my-confluent-cp-kafka"
-      bootstrapBrokers: "my-confluent-cp-kafka:9092"
+    - name: "theodolite-cp-kafka"
+      bootstrapBrokers: "theodolite-cp-kafka:9092"
 
   ## The interval between refreshing metrics
   pollIntervalSeconds: 15
@@ -229,4 +238,50 @@ prometheus:
   clusterRole:
     enabled: true
   clusterRoleBinding:
-    enabled: true
\ No newline at end of file
+    enabled: true
+
+###
+# Theodolite Operator
+###
+operator:
+  enabled: true
+  
+  image: ghcr.io/cau-se/theodolite
+  imageTag: theodolite-kotlin-latest
+  imagePullPolicy: Always
+
+  executionCRD:
+    create: true
+  benchmarkCRD:
+    create: true
+
+  sloChecker:
+    lagTrend:
+      enabled: true
+      image: ghcr.io/cau-se/theodolite-slo-checker-lag-trend
+      imageTag: theodolite-kotlin-latest
+      imagePullPolicy: Always
+
+  resultsVolume:
+    enabled: true
+    persistentVolumeClaim:
+      name: theodolite-pv-claim
+    accessSidecar:
+      enabled: true
+      image: busybox
+      imageTag: stable
+      imagePullPolicy: IfNotPresent
+
+
+serviceAccount:
+  create: true
+
+rbac:
+  create: true
+
+randomScheduler:
+  enabled: true
+  rbac:
+    create: true
+  serviceAccount:
+    create: true
diff --git a/execution/infrastructure/kafka/values.yaml b/execution/infrastructure/kafka/values.yaml
index 9c708ca054bc017874522cebb4ad2157bdce85a7..15fd8a822a18521f247584d1becbd09c19c137d2 100644
--- a/execution/infrastructure/kafka/values.yaml
+++ b/execution/infrastructure/kafka/values.yaml
@@ -48,7 +48,7 @@ cp-kafka:
   #   cpu: 100m
   #   memory: 128Mi
   configurationOverrides:
-    #"offsets.topic.replication.factor": "3"
+    # offsets.topic.replication.factor: "3"
     "message.max.bytes": "134217728" # 128 MB
     "replica.fetch.max.bytes": "134217728" # 128 MB
     # "default.replication.factor": 3
diff --git a/execution/infrastructure/kafka/values_kafka_nodeport.yaml b/execution/infrastructure/kafka/values_kafka_nodeport.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cf1deb3a0eda97039ad4609a1f07fa54d4d5d1ea
--- /dev/null
+++ b/execution/infrastructure/kafka/values_kafka_nodeport.yaml
@@ -0,0 +1,97 @@
+## ------------------------------------------------------
+## Zookeeper
+## ------------------------------------------------------
+cp-zookeeper:
+  enabled: true
+  servers: 1
+  image: confluentinc/cp-zookeeper
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  persistence:
+    enabled: false
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+## ------------------------------------------------------
+## Kafka
+## ------------------------------------------------------
+cp-kafka:
+  enabled: true
+  brokers: 1
+  image: confluentinc/cp-enterprise-kafka
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  persistence:
+    enabled: false
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+  configurationOverrides:
+    offsets.topic.replication.factor: "1"
+    "message.max.bytes": "134217728" # 128 MB
+    "replica.fetch.max.bytes": "134217728" # 128 MB
+    # "default.replication.factor": 3
+    # "min.insync.replicas": 2
+    "auto.create.topics.enable": false
+    "log.retention.ms": "10000" # 10s
+    #"log.retention.ms": "86400000" # 24h
+    "metrics.sample.window.ms": "5000" #5s
+  
+  # access kafka from outside
+  nodeport:
+    enabled: true
+
+## ------------------------------------------------------
+## Schema Registry
+## ------------------------------------------------------
+cp-schema-registry:
+  enabled: true
+  image: confluentinc/cp-schema-registry
+  imageTag: 5.4.0
+  ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
+  ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
+  imagePullSecrets:
+  #  - name: "regcred"
+  heapOptions: "-Xms512M -Xmx512M"
+  resources: {}
+  ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
+  ## and remove the curly braces after 'resources:'
+  #  limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  #  requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+cp-kafka-rest:
+  enabled: false
+
+cp-kafka-connect:
+  enabled: false
+
+cp-ksql-server:
+  enabled: false
+
+cp-control-center:
+  enabled: false
diff --git a/execution/infrastructure/kubernetes/rbac/role.yaml b/execution/infrastructure/kubernetes/rbac/role.yaml
index 84ba14a8bc7a6eceb8a20596ede057ca2271b967..e45814eedacd30715075f66e520f9f9e6bfc42ad 100644
--- a/execution/infrastructure/kubernetes/rbac/role.yaml
+++ b/execution/infrastructure/kubernetes/rbac/role.yaml
@@ -38,4 +38,26 @@ rules:
     verbs:
     - delete
     - list
-    - create
\ No newline at end of file
+    - create
+  - apiGroups:
+    - theodolite.com
+    resources: 
+    - executions
+    - benchmarks
+    verbs:
+    - delete
+    - list
+    - get
+    - create
+    - watch
+    - update
+    - patch
+  - apiGroups:
+    - coordination.k8s.io
+    resources:
+    - leases
+    verbs:
+    - delete
+    - get
+    - create
+    - update
\ No newline at end of file
diff --git a/execution/infrastructure/random-scheduler/schedule.sh b/execution/infrastructure/random-scheduler/schedule.sh
index e2e10c0abbdd06da5f5075cd21851331ffb593fe..06745354d061225cfc1b3a746d361036b647051b 100755
--- a/execution/infrastructure/random-scheduler/schedule.sh
+++ b/execution/infrastructure/random-scheduler/schedule.sh
@@ -8,11 +8,18 @@ while true;
 do
     for PODNAME in $(kubectl get pods -n $TARGET_NAMESPACE -o json | jq '.items[] | select(.spec.schedulerName == "random-scheduler") | select(.spec.nodeName == null) | .metadata.name' | tr -d '"');
     do
-        NODES=($(kubectl get nodes -o json | jq '.items[].metadata.name' | tr -d '"'))
+        NODE_SELECTOR=$(kubectl get pod $PODNAME -n $TARGET_NAMESPACE -o json | jq -S 'if .spec.nodeSelector != null then .spec.nodeSelector else {} end')
+        NODES=($(kubectl get nodes -o json | jq --argjson nodeSelector "$NODE_SELECTOR" '.items[] | select(.metadata.labels | contains($nodeSelector)) | .metadata.name' | tr -d '"'))
         NUMNODES=${#NODES[@]}
+        if [ $NUMNODES -eq 0 ]; then
+            echo "No nodes found matching the node selector: $NODE_SELECTOR from pod $PODNAME"
+            echo "Pod $PODNAME cannot be scheduled."
+            continue;
+        fi
+        echo "Found $NUM_NODES suitable nodes for pod $PODNAME"
         CHOSEN=${NODES[$[$RANDOM % $NUMNODES]]}
         curl --header "Content-Type:application/json" --request POST --data '{"apiVersion":"v1", "kind": "Binding", "metadata": {"name": "'$PODNAME'"}, "target": {"apiVersion": "v1", "kind": "Node", "name": "'$CHOSEN'"}}' localhost:8080/api/v1/namespaces/$TARGET_NAMESPACE/pods/$PODNAME/binding/
         echo "Assigned $PODNAME to $CHOSEN"
     done
     sleep 1
-done
\ No newline at end of file
+done
diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py
deleted file mode 100644
index 5b78ef3653753a2b95ac9b74bf8de156a71fb14c..0000000000000000000000000000000000000000
--- a/execution/lag_analysis.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import sys
-import os
-import requests
-from datetime import datetime, timedelta, timezone
-import pandas as pd
-import matplotlib.pyplot as plt
-import csv
-import logging
-
-
-def main(exp_id, benchmark, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
-    print("Main")
-    time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0))
-
-    now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0)
-    now = now_local - timedelta(milliseconds=time_diff_ms)
-    print(f"Now Local: {now_local}")
-    print(f"Now Used: {now}")
-
-    end = now
-    start = now - timedelta(minutes=execution_minutes)
-
-    #print(start.isoformat().replace('+00:00', 'Z'))
-    #print(end.isoformat().replace('+00:00', 'Z'))
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        # 'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)",
-        'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-    # response
-    # print(response.request.path_url)
-    # response.content
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        # print(result['metric']['topic'])
-        topic = result['metric']['topic']
-        for value in result['values']:
-            # print(value)
-            d.append({'topic': topic, 'timestamp': int(
-                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
-
-    df = pd.DataFrame(d)
-
-    # Do some analysis
-
-    input = df.loc[df['topic'] == "input"]
-
-    # input.plot(kind='line',x='timestamp',y='value',color='red')
-    # plt.show()
-
-    from sklearn.linear_model import LinearRegression
-
-    # values converts it into a numpy array
-    X = input.iloc[:, 1].values.reshape(-1, 1)
-    # -1 means that calculate the dimension of rows, but have 1 column
-    Y = input.iloc[:, 2].values.reshape(-1, 1)
-    linear_regressor = LinearRegression()  # create object for the class
-    linear_regressor.fit(X, Y)  # perform linear regression
-    Y_pred = linear_regressor.predict(X)  # make predictions
-
-    print(linear_regressor.coef_)
-
-    # print(Y_pred)
-
-    fields = [exp_id, datetime.now(), benchmark, dim_value,
-              instances, linear_regressor.coef_]
-    print(fields)
-    with open(f'{result_path}/results.csv', 'a') as f:
-        writer = csv.writer(f)
-        writer.writerow(fields)
-
-    filename = f"{result_path}/exp{exp_id}_{benchmark}_{dim_value}_{instances}"
-
-    plt.plot(X, Y)
-    plt.plot(X, Y_pred, color='red')
-
-    plt.savefig(f"{filename}_plot.png")
-
-    df.to_csv(f"{filename}_values.csv")
-
-    # Load total lag count
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        'query': "sum by(group)(kafka_consumergroup_group_lag > 0)",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        # print(result['metric']['topic'])
-        group = result['metric']['group']
-        for value in result['values']:
-            # print(value)
-            d.append({'group': group, 'timestamp': int(
-                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
-
-    df = pd.DataFrame(d)
-
-    df.to_csv(f"{filename}_totallag.csv")
-
-    # Load partition count
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        # print(result['metric']['topic'])
-        topic = result['metric']['topic']
-        for value in result['values']:
-            # print(value)
-            d.append({'topic': topic, 'timestamp': int(
-                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
-
-    df = pd.DataFrame(d)
-
-    df.to_csv(f"{filename}_partitions.csv")
-
-    # Load instances count
-
-    response = requests.get(prometheus_base_url + '/api/v1/query_range', params={
-        'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
-        'start': start.isoformat(),
-        'end': end.isoformat(),
-        'step': '5s'})
-
-    results = response.json()['data']['result']
-
-    d = []
-
-    for result in results:
-        for value in result['values']:
-            # print(value)
-            d.append({'timestamp': int(value[0]), 'value': int(value[1])})
-
-    df = pd.DataFrame(d)
-
-    df.to_csv(f"{filename}_instances.csv")
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-
-    # Load arguments
-    exp_id = sys.argv[1]
-    benchmark = sys.argv[2]
-    dim_value = sys.argv[3]
-    instances = sys.argv[4]
-    execution_minutes = int(sys.argv[5])
-
-    main(exp_id, benchmark, dim_value, instances, execution_minutes,
-        'http://localhost:9090', 'results')
diff --git a/execution/lib/__init__.py b/execution/lib/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/execution/lib/cli_parser.py b/execution/lib/cli_parser.py
deleted file mode 100644
index de609bc55e21e9467a2b28168be6e478171cfddd..0000000000000000000000000000000000000000
--- a/execution/lib/cli_parser.py
+++ /dev/null
@@ -1,167 +0,0 @@
-import argparse
-import os
-
-
-def env_list_default(env, tf):
-    """
-    Makes a list from an environment string.
-    """
-    v = os.environ.get(env)
-    if v is not None:
-        v = [tf(s) for s in v.split(',')]
-    return v
-
-
-def key_values_to_dict(kvs):
-    """
-    Given a list with key values in form `Key=Value` it creates a dict from it.
-    """
-    my_dict = {}
-    for kv in kvs:
-        k, v = kv.split("=")
-        my_dict[k] = v
-    return my_dict
-
-
-def env_dict_default(env):
-    """
-    Makes a dict from an environment string.
-    """
-    v = os.environ.get(env)
-    if v is not None:
-        return key_values_to_dict(v.split(','))
-    else:
-        return dict()
-
-
-class StoreDictKeyPair(argparse.Action):
-    def __init__(self, option_strings, dest, nargs=None, **kwargs):
-        self._nargs = nargs
-        super(StoreDictKeyPair, self).__init__(
-            option_strings, dest, nargs=nargs, **kwargs)
-
-    def __call__(self, parser, namespace, values, option_string=None):
-        my_dict = key_values_to_dict(values)
-        setattr(namespace, self.dest, my_dict)
-
-
-def default_parser(description):
-    """
-    Returns the default parser that can be used for thodolite and run uc py
-    :param description: The description the argument parser should show.
-    """
-    parser = argparse.ArgumentParser(description=description)
-    parser.add_argument('--uc',
-                        metavar='<uc>',
-                        default=os.environ.get('UC'),
-                        help='[mandatory] use case number, one of 1, 2, 3 or 4')
-    parser.add_argument('--partitions', '-p',
-                        metavar='<partitions>',
-                        type=int,
-                        default=os.environ.get('PARTITIONS', 40),
-                        help='Number of partitions for Kafka topics')
-    parser.add_argument('--cpu-limit', '-cpu',
-                        metavar='<CPU limit>',
-                        default=os.environ.get('CPU_LIMIT', '1000m'),
-                        help='Kubernetes CPU limit')
-    parser.add_argument('--memory-limit', '-mem',
-                        metavar='<memory limit>',
-                        default=os.environ.get('MEMORY_LIMIT', '4Gi'),
-                        help='Kubernetes memory limit')
-    parser.add_argument('--duration', '-d',
-                        metavar='<duration>',
-                        type=int,
-                        default=os.environ.get('DURATION', 5),
-                        help='Duration in minutes subexperiments should be \
-                                executed for')
-    parser.add_argument('--namespace',
-                        metavar='<NS>',
-                        default=os.environ.get('NAMESPACE', 'default'),
-                        help='Defines the Kubernetes where the applications should run')
-    parser.add_argument('--reset',
-                        action="store_true",
-                        default=os.environ.get(
-                            'RESET', 'false').lower() == 'true',
-                        help='Resets the environment before execution')
-    parser.add_argument('--reset-only',
-                        action="store_true",
-                        default=os.environ.get(
-                            'RESET_ONLY', 'false').lower() == 'true',
-                        help='Only resets the environment. Ignores all other parameters')
-    parser.add_argument('--prometheus',
-                        metavar='<URL>',
-                        default=os.environ.get(
-                            'PROMETHEUS_BASE_URL', 'http://localhost:9090'),
-                        help='Defines where to find the prometheus instance')
-    parser.add_argument('--path',
-                        metavar='<path>',
-                        default=os.environ.get('RESULT_PATH', 'results'),
-                        help='A directory path for the results')
-    parser.add_argument("--configurations",
-                        metavar="KEY=VAL",
-                        dest="configurations",
-                        action=StoreDictKeyPair,
-                        nargs="+",
-                        default=env_dict_default('CONFIGURATIONS'),
-                        help='Defines the environment variables for the UC')
-    return parser
-
-
-def benchmark_parser(description):
-    """
-    Parser for the overall benchmark execution
-    :param description: The description the argument parser should show.
-    """
-    parser = default_parser(description)
-
-    parser.add_argument('--loads',
-                        metavar='<load>',
-                        type=int,
-                        nargs='+',
-                        default=env_list_default('LOADS', int),
-                        help='[mandatory] Loads that should be executed')
-    parser.add_argument('--instances', '-i',
-                        dest='instances_list',
-                        metavar='<instances>',
-                        type=int,
-                        nargs='+',
-                        default=env_list_default('INSTANCES', int),
-                        help='[mandatory] List of instances used in benchmarks')
-    parser.add_argument('--domain-restriction',
-                        action="store_true",
-                        default=os.environ.get(
-                            'DOMAIN_RESTRICTION', 'false').lower() == 'true',
-                        help='To use domain restriction. For details see README')
-    parser.add_argument('--search-strategy',
-                        metavar='<strategy>',
-                        default=os.environ.get('SEARCH_STRATEGY', 'default'),
-                        help='The benchmarking search strategy. Can be set to default, linear-search or binary-search')
-    parser.add_argument('--threshold',
-                        type=int,
-                        metavar='<threshold>',
-                        default=os.environ.get('THRESHOLD', 2000),
-                        help='The threshold for the trend slop that the search strategies use to determine that a load could be handled')
-    return parser
-
-
-def execution_parser(description):
-    """
-    Parser for executing one use case
-    :param description: The description the argument parser should show.
-    """
-    parser = default_parser(description)
-    parser.add_argument('--exp-id',
-                        metavar='<exp id>',
-                        default=os.environ.get('EXP_ID'),
-                        help='[mandatory] ID of the experiment')
-    parser.add_argument('--load',
-                        metavar='<load>',
-                        type=int,
-                        default=os.environ.get('LOAD'),
-                        help='[mandatory] Load that should be used for benchmakr')
-    parser.add_argument('--instances',
-                        metavar='<instances>',
-                        type=int,
-                        default=os.environ.get('INSTANCES'),
-                        help='[mandatory] Numbers of instances to be benchmarked')
-    return parser
diff --git a/execution/requirements.txt b/execution/requirements.txt
deleted file mode 100644
index 18a06882007eebf69bf3bf4f84b869454b36a0a6..0000000000000000000000000000000000000000
--- a/execution/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-matplotlib==3.2.0
-pandas==1.0.1
-requests==2.23.0
-scikit-learn==0.22.2.post1
-
-# For run_uc.py
-kubernetes==11.0.0
-confuse==1.1.0
diff --git a/execution/run_uc.py b/execution/run_uc.py
deleted file mode 100644
index 904b87b377ca2db3f2d4ddd4fb70aba0136cfa21..0000000000000000000000000000000000000000
--- a/execution/run_uc.py
+++ /dev/null
@@ -1,609 +0,0 @@
-import argparse  # parse arguments from cli
-import atexit  # used to clear resources at exit of program (e.g. ctrl-c)
-from kubernetes import client, config  # kubernetes api
-from kubernetes.stream import stream
-import lag_analysis
-import logging  # logging
-from os import path, environ  # path utilities
-from lib.cli_parser import execution_parser
-import subprocess  # execute bash commands
-import sys  # for exit of program
-import time  # process sleep
-import yaml  # convert from file to yaml object
-
-coreApi = None  # acces kubernetes core api
-appsApi = None  # acces kubernetes apps api
-customApi = None  # acces kubernetes custom object api
-
-namespace = None
-
-
-def load_variables():
-    """Load the CLI variables given at the command line"""
-    print('Load CLI variables')
-    parser = execution_parser(description='Run use case Programm')
-    args = parser.parse_args()
-    print(args)
-    if (args.exp_id is None or args.uc is None or args.load is None or args.instances is None) and not args.reset_only:
-        print('The options --exp-id, --uc, --load and --instances are mandatory.')
-        print('Some might not be set!')
-        sys.exit(1)
-    return args
-
-
-def initialize_kubernetes_api():
-    """Load the kubernetes config from local or the cluster and creates
-    needed APIs.
-    """
-    global coreApi, appsApi, customApi
-    print('Connect to kubernetes api')
-    try:
-        config.load_kube_config()  # try using local config
-    except config.config_exception.ConfigException as e:
-        # load config from pod, if local config is not available
-        logging.debug(
-            'Failed loading local Kubernetes configuration try from cluster')
-        logging.debug(e)
-        config.load_incluster_config()
-
-    coreApi = client.CoreV1Api()
-    appsApi = client.AppsV1Api()
-    customApi = client.CustomObjectsApi()
-
-
-def create_topics(topics):
-    """Create the topics needed for the use cases
-    :param topics: List of topics that should be created.
-    """
-    # Calling exec and waiting for response
-    print('Create topics')
-    for (topic, partitions) in topics:
-        print(f'Create topic {topic} with #{partitions} partitions')
-        exec_command = [
-            '/bin/sh',
-            '-c',
-            f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181\
-            --create --topic {topic} --partitions {partitions}\
-            --replication-factor 1'
-        ]
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "kafka-client",
-                      namespace,
-                      command=exec_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        print(resp)
-
-
-def load_yaml(file_path):
-    """Creates a yaml file from the file at given path.
-    :param file_path: The path to the file which contains the yaml.
-    :return: The file as a yaml object.
-    """
-    try:
-        f = open(path.join(path.dirname(__file__), file_path))
-        with f:
-            return yaml.safe_load(f)
-    except Exception as e:
-        logging.error('Error opening file %s', file_path)
-        logging.error(e)
-
-
-def load_yaml_files():
-    """Load the needed yaml files and creates objects from them.
-    :return: wg, app_svc, app_svc_monitor ,app_jmx, app_deploy
-    """
-    print('Load kubernetes yaml files')
-    wg_svc = load_yaml('uc-workload-generator/load-generator-service.yaml')
-    wg = load_yaml('uc-workload-generator/workloadGenerator.yaml')
-    app_svc = load_yaml('uc-application/aggregation-service.yaml')
-    app_svc_monitor = load_yaml('uc-application/service-monitor.yaml')
-    app_jmx = load_yaml('uc-application/jmx-configmap.yaml')
-    app_deploy = load_yaml('uc-application/aggregation-deployment.yaml')
-
-    print('Kubernetes yaml files loaded')
-    return wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy
-
-
-def replace_env_value(container, key, value):
-    """
-    Special method to replace in a container with kubernetes env values
-    the value of a given parameter.
-    """
-    next(filter(lambda x: x['name'] == key, container))[
-        'value'] = value
-
-
-def start_workload_generator(svc_yaml, wg_yaml, dim_value, uc_id):
-    """Starts the workload generator.
-    :param wg_yaml: The yaml object for the workload generator service.
-    :param wg_yaml: The yaml object for the workload generator.
-    :param string dim_value: The dimension value the load generator should use.
-    :param string uc_id: Use case id for which load should be generated.
-    :return:
-        The StatefulSet created by the API or in case it already exist/error
-        the yaml object.
-    """
-    print('Start workload generator')
-    svc, wg_deploy = None, None
-
-    # Create Service
-    try:
-        svc = coreApi.create_namespaced_service(
-            namespace=namespace, body=svc_yaml)
-        print(f'Service {svc.metadata.name} created.')
-    except client.rest.ApiException as e:
-        svc = svc_yaml
-        logging.error("Service creation error: %s", e.reason)
-
-    # Create Deployment
-    num_sensors = dim_value
-    wl_max_records = 150000
-    wl_instances = (num_sensors + wl_max_records - 1) // wl_max_records
-
-    # set parameters special for uc 4
-    if uc_id == '4':
-        print('use uc4 stuff')
-        num_nested_groups = dim_value
-        num_sensors = 4
-        approx_num_sensors = num_sensors ** num_nested_groups
-        wl_instances = (approx_num_sensors +
-                        wl_max_records - 1) // wl_max_records
-
-    # Customize workload generator creations
-    wg_yaml['spec']['replicas'] = wl_instances
-    # Set used use case
-    wg_containter = next(filter(
-        lambda x: x['name'] == 'workload-generator', wg_yaml['spec']['template']['spec']['containers']))
-    wg_containter['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id + \
-        '-workload-generator:latest'
-    # Set environment variables
-
-    replace_env_value(wg_containter['env'], 'NUM_SENSORS', str(num_sensors))
-
-    if uc_id == '4':  # Special configuration for UC4
-        replace_env_value(
-            wg_containter['env'], 'NUM_NESTED_GROUPS', str(num_nested_groups))
-
-    try:
-        wg_deploy = appsApi.create_namespaced_deployment(
-            namespace=namespace,
-            body=wg_yaml
-        )
-        print(f'Deployment {wg_deploy.metadata.name} created.')
-    except client.rest.ApiException as e:
-        print(f'Deployment creation error: {e.reason}')
-        wg_deploy = wg_yaml
-
-    return svc, wg_deploy    
-
-
-def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml,
-                      instances, uc_id, memory_limit, cpu_limit,
-                      configurations):
-    """Applies the service, service monitor, jmx config map and start the
-    use case application.
-
-    :param svc_yaml: The yaml object for the service.
-    :param svc_monitor_yaml: The yaml object for the service monitor.
-    :param jmx_yaml: The yaml object for the jmx config map.
-    :param deploy_yaml: The yaml object for the application.
-    :param int instances: Number of instances for use case application.
-    :param string uc_id: The id of the use case to execute.
-    :param string memory_limit: The memory limit for the application.
-    :param string cpu_limit: The CPU limit for the application.
-    :param dict configurations: A dictionary with ENV variables for configurations.
-    :return:
-        The Service, ServiceMonitor, JMX ConfigMap and Deployment.
-        In case the resource already exist/error the yaml object is returned.
-        return svc, svc_monitor, jmx_cm, app_deploy
-    """
-    print('Start use case application')
-    svc, svc_monitor, jmx_cm, app_deploy = None, None, None, None
-
-    # Create Service
-    try:
-        svc = coreApi.create_namespaced_service(
-            namespace=namespace, body=svc_yaml)
-        print(f'Service {svc.metadata.name} created.')
-    except client.rest.ApiException as e:
-        svc = svc_yaml
-        logging.error("Service creation error: %s", e.reason)
-
-    # Create custom object service monitor
-    try:
-        svc_monitor = customApi.create_namespaced_custom_object(
-            group="monitoring.coreos.com",
-            version="v1",
-            namespace=namespace,
-            plural="servicemonitors",  # CustomResourceDef of ServiceMonitor
-            body=svc_monitor_yaml,
-        )
-        print(f"ServiceMonitor '{svc_monitor['metadata']['name']}' created.")
-    except client.rest.ApiException as e:
-        svc_monitor = svc_monitor_yaml
-        logging.error("ServiceMonitor creation error: %s", e.reason)
-
-    # Apply jmx config map for aggregation service
-    try:
-        jmx_cm = coreApi.create_namespaced_config_map(
-            namespace=namespace, body=jmx_yaml)
-        print(f"ConfigMap '{jmx_cm.metadata.name}' created.")
-    except client.rest.ApiException as e:
-        jmx_cm = jmx_yaml
-        logging.error("ConfigMap creation error: %s", e.reason)
-
-    # Create deployment
-    deploy_yaml['spec']['replicas'] = instances
-    app_container = next(filter(
-        lambda x: x['name'] == 'uc-application',
-        deploy_yaml['spec']['template']['spec']['containers']))
-    app_container['image'] = 'ghcr.io/cau-se/theodolite-uc' + uc_id \
-        + '-kstreams-app:latest'
-
-    # Set configurations environment parameters for SPE
-    for k, v in configurations.items():
-        # check if environment variable is already definde in yaml
-        env = next(filter(lambda x: x['name'] == k,
-                          app_container['env']), None)
-        if env is not None:
-            env['value'] = v  # replace value
-        else:
-            # create new environment pair
-            conf = {'name': k, 'value': v}
-            app_container['env'].append(conf)
-
-    # Set resources in Kubernetes
-    app_container['resources']['limits']['memory'] = memory_limit
-    app_container['resources']['limits']['cpu'] = cpu_limit
-
-    # Deploy application
-    try:
-        app_deploy = appsApi.create_namespaced_deployment(
-            namespace=namespace,
-            body=deploy_yaml
-        )
-        print(f"Deployment '{app_deploy.metadata.name}' created.")
-    except client.rest.ApiException as e:
-        app_deploy = deploy_yaml
-        logging.error("Deployment creation error: %s", e.reason)
-
-    return svc, svc_monitor, jmx_cm, app_deploy
-
-
-def wait_execution(execution_minutes):
-    """
-    Wait time while in execution.
-    :param int execution_minutes: The duration to wait for execution.
-    """
-    print('Wait while executing')
-
-    for i in range(execution_minutes):
-        time.sleep(60)
-        print(f'Executed: {i+1} minutes')
-    print('Execution finished')
-    return
-
-
-def run_evaluation(exp_id, uc_id, dim_value, instances, execution_minutes, prometheus_base_url, result_path):
-    """
-    Runs the evaluation function
-    :param string exp_id: ID of the experiment.
-    :param string uc_id: ID of the executed use case.
-    :param int dim_value: The dimension value used for execution.
-    :param int instances: The number of instances used for the execution.
-    :param int execution_minutes: How long the use case where executed.
-    """
-    print('Run evaluation function')
-    try:
-        lag_analysis.main(exp_id, f'uc{uc_id}', dim_value, instances,
-                          execution_minutes, prometheus_base_url,
-                          result_path)
-    except Exception as e:
-        err_msg = 'Evaluation function failed'
-        print(err_msg)
-        logging.exception(err_msg)
-        print('Benchmark execution continues')
-
-    return
-
-
-def delete_resource(obj, del_func):
-    """
-    Helper function to delete kuberentes resources.
-    First tries to delete with the kuberentes object.
-    Then it uses the dict representation of yaml to delete the object.
-    :param obj: Either kubernetes resource object or yaml as a dict.
-    :param del_func: The function that need to be executed for deletion
-    """
-    try:
-        del_func(obj.metadata.name, namespace)
-    except Exception as e:
-        logging.debug(
-            'Error deleting resource with api object, try with dict.')
-        try:
-            del_func(obj['metadata']['name'], namespace)
-        except Exception as e:
-            logging.error("Error deleting resource")
-            logging.error(e)
-            return
-    print('Resource deleted')
-
-
-def stop_applications(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy):
-    """Stops the applied applications and delete resources.
-    :param wg: The load generator service.
-    :param wg: The load generator deployment.
-    :param app_svc: The application service.
-    :param app_svc_monitor: The application service monitor.
-    :param app_jmx: The application jmx config map.
-    :param app_deploy: The application deployment.
-    """
-    print('Stop use case application and load generator')
-
-    print('Delete load generator deployment')
-    delete_resource(wg, appsApi.delete_namespaced_deployment)
-
-    print('Delete load generator service')
-    delete_resource(wg_svc, coreApi.delete_namespaced_service)
-
-    print('Delete app service')
-    delete_resource(app_svc, coreApi.delete_namespaced_service)
-
-    print('Delete service monitor')
-    try:
-        customApi.delete_namespaced_custom_object(
-            group="monitoring.coreos.com",
-            version="v1",
-            namespace=namespace,
-            plural="servicemonitors",
-            name=app_svc_monitor['metadata']['name'])
-        print('Resource deleted')
-    except Exception as e:
-        print('Error deleting service monitor')
-
-    print('Delete jmx config map')
-    delete_resource(app_jmx, coreApi.delete_namespaced_config_map)
-
-    print('Delete uc application')
-    delete_resource(app_deploy, appsApi.delete_namespaced_deployment)
-
-    print('Check all pods deleted.')
-    while True:
-        # Wait bit for deletion
-        time.sleep(2)
-
-        # Count how many pod still need to be deleted
-        no_load = len(coreApi.list_namespaced_pod(
-            namespace, label_selector='app=titan-ccp-load-generator').items)
-        no_uc = len(coreApi.list_namespaced_pod(
-            namespace, label_selector='app=titan-ccp-aggregation').items)
-
-        # Check if all pods deleted
-        if no_load <= 0 and no_uc <= 0:
-            print('All pods deleted.')
-            break
-
-        print(f'#{no_load} load generator and #{no_uc} uc pods needs to be deleted')
-    return
-
-
-def delete_topics(topics):
-    """Delete topics from Kafka.
-    :param topics: List of topics to delete.
-    """
-    print('Delete topics from Kafka')
-
-    topics_delete = 'theodolite-.*|' + '|'.join([ti[0] for ti in topics])
-
-    num_topics_command = [
-        '/bin/sh',
-        '-c',
-        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list \
-        | sed -n -E "/^({topics_delete})\
-        ( - marked for deletion)?$/p" | wc -l'
-    ]
-
-    topics_deletion_command = [
-        '/bin/sh',
-        '-c',
-        f'kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete \
-        --topic "{topics_delete}"'
-    ]
-
-    # Wait that topics get deleted
-    while True:
-        # topic deletion, sometimes a second deletion seems to be required
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "kafka-client",
-                      namespace,
-                      command=topics_deletion_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        print(resp)
-
-        print('Wait for topic deletion')
-        time.sleep(2)
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "kafka-client",
-                      namespace,
-                      command=num_topics_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        if resp == '0':
-            print('Topics deleted')
-            break
-    return
-
-
-def reset_zookeeper():
-    """Delete ZooKeeper configurations used for workload generation.
-    """
-    print('Delete ZooKeeper configurations used for workload generation')
-
-    delete_zoo_data_command = [
-        '/bin/sh',
-        '-c',
-        'zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall '
-        + '/workload-generation'
-    ]
-
-    check_zoo_data_command = [
-        '/bin/sh',
-        '-c',
-        'zookeeper-shell my-confluent-cp-zookeeper:2181 get '
-        + '/workload-generation'
-    ]
-
-    # Wait for configuration deletion
-    while True:
-        # Delete Zookeeper configuration data
-        resp = stream(coreApi.connect_get_namespaced_pod_exec,
-                      "zookeeper-client",
-                      namespace,
-                      command=delete_zoo_data_command,
-                      stderr=True, stdin=False,
-                      stdout=True, tty=False)
-        logging.debug(resp)
-
-        # Check data is deleted
-        client = stream(coreApi.connect_get_namespaced_pod_exec,
-                        "zookeeper-client",
-                        namespace,
-                        command=check_zoo_data_command,
-                        stderr=True, stdin=False,
-                        stdout=True, tty=False,
-                        _preload_content=False)  # Get client for returncode
-        client.run_forever(timeout=60)  # Start the client
-
-        if client.returncode == 1:  # Means data not available anymore
-            print('ZooKeeper reset was successful.')
-            break
-        else:
-            print('ZooKeeper reset was not successful. Retrying in 5s.')
-            time.sleep(5)
-    return
-
-
-def stop_lag_exporter():
-    """
-    Stop the lag exporter in order to reset it and allow smooth execution for
-    next use cases.
-    """
-    print('Stop the lag exporter')
-
-    try:
-        # Get lag exporter
-        pod_list = coreApi.list_namespaced_pod(
-            namespace=namespace, label_selector='app.kubernetes.io/name=kafka-lag-exporter')
-        lag_exporter_pod = pod_list.items[0].metadata.name
-
-        # Delete lag exporter pod
-        res = coreApi.delete_namespaced_pod(
-            name=lag_exporter_pod, namespace=namespace)
-    except ApiException as e:
-        logging.error('Exception while stopping lag exporter')
-        logging.error(e)
-
-    print('Deleted lag exporter pod: ' + lag_exporter_pod)
-    return
-
-
-def reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics):
-    """
-    Stop the applications, delete topics, reset zookeeper and stop lag exporter.
-    """
-    print('Reset cluster')
-    stop_applications(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy)
-    print('---------------------')
-    delete_topics(topics)
-    print('---------------------')
-    reset_zookeeper()
-    print('---------------------')
-    stop_lag_exporter()
-
-
-def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limit, execution_minutes, prometheus_base_url, reset, ns, result_path, configurations, reset_only=False):
-    """
-    Main method to execute one time the benchmark for a given use case.
-    Start workload generator/application -> execute -> analyse -> stop all
-    :param string exp_id: The number of executed experiment
-    :param string uc_id: Use case to execute
-    :param int dim_value: Dimension value for load generator.
-    :param int instances: Number of instances for application.
-    :param int partitions: Number of partitions the kafka topics should have.
-    :param string cpu_limit: Max CPU utilazation for application.
-    :param string memory_limit: Max memory utilazation for application.
-    :param int execution_minutes: How long to execute the benchmark.
-    :param boolean reset: Flag for reset of cluster before execution.
-    :param dict configurations: Key value pairs for setting env variables of UC.
-    :param boolean reset_only: Flag to only reset the application.
-    """
-    global namespace
-    namespace = ns
-    wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy = load_yaml_files()
-    print('---------------------')
-
-    initialize_kubernetes_api()
-    print('---------------------')
-
-    topics = [('input', partitions),
-              ('output', partitions),
-              ('aggregation-feedback', partitions),
-              ('configuration', 1)]
-
-    # Check for reset options
-    if reset_only:
-        # Only reset cluster an then end program
-        reset_cluster(wg_svc, wg, app_svc, app_svc_monitor,
-                      app_jmx, app_deploy, topics)
-        sys.exit()
-    if reset:
-        # Reset cluster before execution
-        print('Reset only mode')
-        reset_cluster(wg_svc, wg, app_svc, app_svc_monitor,
-                      app_jmx, app_deploy, topics)
-        print('---------------------')
-
-    # Register the reset operation so that is executed at the abort of program
-    atexit.register(reset_cluster, wg_svc, wg, app_svc,
-                    app_svc_monitor, app_jmx, app_deploy, topics)
-
-    create_topics(topics)
-    print('---------------------')
-
-    wg_svc, wg = start_workload_generator(wg_svc, wg, dim_value, uc_id)
-    print('---------------------')
-
-    app_svc, app_svc_monitor, app_jmx, app_deploy = start_application(
-        app_svc,
-        app_svc_monitor,
-        app_jmx,
-        app_deploy,
-        instances,
-        uc_id,
-        memory_limit,
-        cpu_limit,
-        configurations)
-    print('---------------------')
-
-    wait_execution(execution_minutes)
-    print('---------------------')
-
-    run_evaluation(exp_id, uc_id, dim_value, instances,
-                   execution_minutes, prometheus_base_url, result_path)
-    print('---------------------')
-
-    # Reset cluster regular, therefore abort exit not needed anymore
-    reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics)
-    atexit.unregister(reset_cluster)
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-    args = load_variables()
-    print('---------------------')
-    main(args.exp_id, args.uc, args.load, args.instances, args.partitions,
-         args.cpu_limit, args.memory_limit, args.duration, args.prometheus,
-         args.reset, args.namespace, args.path, args.configurations,
-         args.reset_only)
diff --git a/execution/strategies/__init__.py b/execution/strategies/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/execution/strategies/config.py b/execution/strategies/config.py
deleted file mode 100644
index d4df97c18ae54c7c181ddf08264c013f9447350f..0000000000000000000000000000000000000000
--- a/execution/strategies/config.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from dataclasses import dataclass
-
-@dataclass
-class ExperimentConfig:
-    """ Wrapper for the configuration of an experiment. """
-    use_case: str
-    exp_id: int
-    dim_values: list
-    replicass: list
-    partitions: int
-    cpu_limit: str
-    memory_limit: str
-    execution_minutes: int
-    prometheus_base_url: str
-    reset: bool
-    namespace: str
-    result_path: str
-    configurations: dict
-    domain_restriction_strategy: object
-    search_strategy: object
-    threshold: int
-    subexperiment_executor: object
-    subexperiment_evaluator: object
diff --git a/execution/strategies/experiment_execution.py b/execution/strategies/experiment_execution.py
deleted file mode 100644
index c2ee18f9b79a6e880dbcb69b47061cc5ecc6b9ba..0000000000000000000000000000000000000000
--- a/execution/strategies/experiment_execution.py
+++ /dev/null
@@ -1,6 +0,0 @@
-class ExperimentExecutor:
-    def __init__(self, config):
-        self.config=config
-    
-    def execute(self):
-        self.config.domain_restriction_strategy.execute(self.config)
diff --git a/execution/strategies/strategies/config.py b/execution/strategies/strategies/config.py
deleted file mode 100644
index 5c31f8c97a4085931cdfa1fa017d4e5909e21915..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/config.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from dataclasses import dataclass
-
-@dataclass
-class SubexperimentConfig:
-    """ Wrapper for the configuration of a subexperiment """
-    use_case: str
-    exp_id: int
-    counter: int
-    dim_value: int
-    replicas: int
-    partitions: int
-    cpu_limit: str
-    memory_limit: str
-    execution_minutes: int
-    prometheus_base_url: str
-    reset: bool
-    namespace: str
-    result_path: str
-    configurations: dict
diff --git a/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py b/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py
deleted file mode 100644
index b218731fc76d83347b4dbf10448f01615d378c0b..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/domain_restriction/lower_bound_strategy.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# The lower bound strategy
-def execute(config):
-    dim_value_index = 0
-    lower_bound_replicas_index = 0
-    subexperiment_counter = 0
-    while dim_value_index < len(config.dim_values) and lower_bound_replicas_index >= 0 and lower_bound_replicas_index < len(config.replicass):
-        lower_bound_replicas_index, subexperiment_counter = config.search_strategy.execute(
-            config=config,
-            dim_value_index=dim_value_index,
-            lower_replicas_bound_index=lower_bound_replicas_index,
-            subexperiment_counter=subexperiment_counter)
-        dim_value_index+=1
\ No newline at end of file
diff --git a/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py b/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py
deleted file mode 100644
index e5dea56118460b0dfdc6b1c36ce2587b6752512b..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/domain_restriction/no_lower_bound_strategy.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# The strategy where the domain contains all amounts of instances
-def execute(config):
-    dim_value_index = 0
-    subexperiment_counter = 0
-    while dim_value_index < len(config.dim_values):
-        _, subexperiment_counter = config.search_strategy.execute(
-            config=config,
-            dim_value_index=dim_value_index,
-            lower_replicas_bound_index=0,
-            subexperiment_counter=subexperiment_counter)
-        dim_value_index+=1
\ No newline at end of file
diff --git a/execution/strategies/strategies/search/binary_search_strategy.py b/execution/strategies/strategies/search/binary_search_strategy.py
deleted file mode 100644
index 46748cbda250597b3a7644522126268be4599293..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/search/binary_search_strategy.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# The binary search strategy
-import os
-from strategies.strategies.config import SubexperimentConfig
-
-def binary_search(config, dim_value, lower, upper, subexperiment_counter):
-    if lower == upper:
-        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success: # successful, the upper neighbor is assumed to also has been successful
-            return (lower, subexperiment_counter+1)
-        else: # not successful
-            return (lower+1, subexperiment_counter)
-    elif lower+1==upper:
-        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[lower]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[lower], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success: # minimal instances found
-            return (lower, subexperiment_counter)
-        else: # not successful, check if lower+1 instances are sufficient
-            print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[upper]}")
-            subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[upper], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-            config.subexperiment_executor.execute(subexperiment_config)
-            success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                             config.threshold)
-            if success: # minimal instances found
-                return (upper, subexperiment_counter)
-            else:
-                return (upper+1, subexperiment_counter)
-    else:
-        # test mid
-        mid=(upper+lower)//2
-        print(f"Run subexperiment {subexperiment_counter} with config {dim_value} {config.replicass[mid]}")
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, config.replicass[mid], config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success: # success -> search in (lower, mid-1)
-            return binary_search(config, dim_value, lower, mid-1, subexperiment_counter+1)
-        else: # not success -> search in (mid+1, upper)
-            return binary_search(config, dim_value, mid+1, upper, subexperiment_counter+1)
-
-def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    upper = len(config.replicass)-1
-    dim_value=config.dim_values[dim_value_index]
-    return binary_search(config, dim_value, lower_replicas_bound_index, upper, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/check_all_strategy.py b/execution/strategies/strategies/search/check_all_strategy.py
deleted file mode 100644
index 0861945113b829fa79317d8a1a6312b4d6e4f71d..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/search/check_all_strategy.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# The check_all strategy
-import os
-from strategies.strategies.config import SubexperimentConfig
-
-
-def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    new_lower_replicas_bound_index = lower_replicas_bound_index
-    new_lower_replicas_bound_found = False
-    subexperiments_total = len(config.dim_values) * len(config.replicass)
-    while lower_replicas_bound_index < len(config.replicass):
-        subexperiment_counter += 1
-        dim_value = config.dim_values[dim_value_index]
-        replicas = config.replicass[lower_replicas_bound_index]
-        print(
-            f"Run subexperiment {subexperiment_counter} of {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
-
-        subexperiment_config = SubexperimentConfig(
-            config.use_case, config.exp_id, subexperiment_counter, dim_value,
-            replicas, config.partitions, config.cpu_limit, config.memory_limit,
-            config.execution_minutes, config.prometheus_base_url, config.reset,
-            config.namespace, config.result_path, config.configurations)
-
-        config.subexperiment_executor.execute(subexperiment_config)
-
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success and not new_lower_replicas_bound_found:
-            new_lower_replicas_bound_found = True
-            new_lower_replicas_bound_index = lower_replicas_bound_index
-        lower_replicas_bound_index += 1
-    return (new_lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/strategies/search/linear_search_strategy.py b/execution/strategies/strategies/search/linear_search_strategy.py
deleted file mode 100644
index 8e777303742e54cf2a11a1bde60e95b8aa85489d..0000000000000000000000000000000000000000
--- a/execution/strategies/strategies/search/linear_search_strategy.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# The linear-search strategy
-
-import os
-from strategies.strategies.config import SubexperimentConfig
-
-def execute(config, dim_value_index, lower_replicas_bound_index, subexperiment_counter):
-    subexperiments_total=len(config.dim_values)+len(config.replicass)-1
-    dim_value=config.dim_values[dim_value_index]
-    while lower_replicas_bound_index < len(config.replicass):
-        subexperiment_counter+=1
-        replicas=config.replicass[lower_replicas_bound_index]
-        print(f"Run subexperiment {subexperiment_counter} from at most {subexperiments_total} with dimension value {dim_value} and {replicas} replicas.")
-
-        subexperiment_config = SubexperimentConfig(config.use_case, config.exp_id, subexperiment_counter, dim_value, replicas, config.partitions, config.cpu_limit, config.memory_limit, config.execution_minutes, config.prometheus_base_url, config.reset, config.namespace, config.result_path, config.configurations)
-
-        config.subexperiment_executor.execute(subexperiment_config)
-        success = config.subexperiment_evaluator.execute(subexperiment_config,
-                                                         config.threshold)
-        if success:
-            return (lower_replicas_bound_index, subexperiment_counter)
-        else:
-            lower_replicas_bound_index+=1
-    return (lower_replicas_bound_index, subexperiment_counter)
diff --git a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py b/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
deleted file mode 100644
index 30188de837746b76113ec635ca77fadc3a91cb92..0000000000000000000000000000000000000000
--- a/execution/strategies/subexperiment_evaluation/subexperiment_evaluator.py
+++ /dev/null
@@ -1,29 +0,0 @@
-import lib.trend_slope_computer as trend_slope_computer
-import logging
-import os
-
-WARMUP_SEC = 60
-
-def execute(config, threshold):
-    """
-    Check the trend slope of the totallag of the subexperiment if it comes below
-    the threshold.
-
-    :param config: Configuration of the subexperiment.
-    :param threshold: The threshold the trendslope need to come below.
-    """
-    cwd = f'{os.getcwd()}/{config.result_path}'
-    file = f"exp{config.exp_id}_uc{config.use_case}_{config.dim_value}_{config.replicas}_totallag.csv"
-
-    try:
-        trend_slope = trend_slope_computer.compute(cwd, file, WARMUP_SEC)
-    except Exception as e:
-        err_msg = 'Computing trend slope failed'
-        print(err_msg)
-        logging.exception(err_msg)
-        print('Mark this subexperiment as not successful and continue benchmark')
-        return False
-
-    print(f"Trend Slope: {trend_slope}")
-
-    return trend_slope < threshold
diff --git a/execution/strategies/subexperiment_execution/subexperiment_executor.py b/execution/strategies/subexperiment_execution/subexperiment_executor.py
deleted file mode 100644
index 6931dacfc72081cbe112c4d6d1003703ba42c526..0000000000000000000000000000000000000000
--- a/execution/strategies/subexperiment_execution/subexperiment_executor.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Wrapper that makes the execution method of a subexperiment interchangable.
-
-import os
-import run_uc
-
-def execute(subexperiment_config):
-    run_uc.main(
-        exp_id=subexperiment_config.exp_id,
-        uc_id=subexperiment_config.use_case,
-        dim_value=int(subexperiment_config.dim_value),
-        instances=int(subexperiment_config.replicas),
-        partitions=subexperiment_config.partitions,
-        cpu_limit=subexperiment_config.cpu_limit,
-        memory_limit=subexperiment_config.memory_limit,
-        execution_minutes=int(subexperiment_config.execution_minutes),
-        prometheus_base_url=subexperiment_config.prometheus_base_url,
-        reset=subexperiment_config.reset,
-        ns=subexperiment_config.namespace,
-        result_path=subexperiment_config.result_path,
-        configurations=subexperiment_config.configurations)
diff --git a/execution/strategies/tests/.gitignore b/execution/strategies/tests/.gitignore
deleted file mode 100644
index 1998c294f84ec0ff4b32396e4cd8e74e352672e6..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.cache
\ No newline at end of file
diff --git a/execution/strategies/tests/__init__.py b/execution/strategies/tests/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py b/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py
deleted file mode 100644
index d93d4924cf09015c714604f2fc995e1db971e69d..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_domain_restriction_binary_search_strategy.py
+++ /dev/null
@@ -1,105 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.binary_search_strategy as binary_search_strategy
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as common known arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-expected_order = [
-        (0,3), # workload dim 0
-        (0,1), 
-        (0,0),
-        (1,3), # workload dim 1
-        (1,1),
-        (1,2),
-        (2,4), # workload dim 2
-        (2,2),
-        (3,4), # workload dim 3
-        (3,2),
-        (3,3),
-        (4,4), # workload dim 4
-        (4,3),
-        (5,5), # workload dim 5
-        (5,6),
-        (6,6) # workload dim 6
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(last_experiment)
-    print("Index was expected to be:")
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_binary_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=lower_bound_strategy,
-        search_strategy=binary_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_domain_restriction_check_all_strategy.py b/execution/strategies/tests/test_domain_restriction_check_all_strategy.py
deleted file mode 100644
index c15daca6ebab3171f0995c048afe56c0185efe56..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_domain_restriction_check_all_strategy.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.check_all_strategy as check_all_strategy
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0), # workload dim 0
-        (0,1),
-        (0,2),
-        (0,3),
-        (0,4),
-        (0,5),
-        (0,6),
-        (1,0), # workload dim 1
-        (1,1),
-        (1,2),
-        (1,3),
-        (1,4),
-        (1,5),
-        (1,6),
-        (2,2), # workload dim 2
-        (2,3),
-        (2,4),
-        (2,5),
-        (2,6),
-        (3,2), # workload dim 3
-        (3,3),
-        (3,4),
-        (3,5),
-        (3,6),
-        (4,3), # workload dim 4
-        (4,4),
-        (4,5),
-        (4,6),
-        (5,4), # workload dim 3
-        (5,5),
-        (5,6),
-        (6,6) # workload dim 6
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=lower_bound_strategy,
-        search_strategy=check_all_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py b/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py
deleted file mode 100644
index 86e2cd29d187cb83166102c503ee79e5e1424573..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_domain_restriction_linear_search_strategy.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.linear_search_strategy as linear_search_strategy
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0),
-        (1,0),
-        (1,1),
-        (1,2),
-        (2,2),
-        (3,2),
-        (3,3),
-        (4,3),
-        (4,4),
-        (5,4),
-        (5,5),
-        (5,6),
-        (6,6)
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=lower_bound_strategy,
-        search_strategy=linear_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_binary_search_strategy.py b/execution/strategies/tests/test_no_restriction_binary_search_strategy.py
deleted file mode 100644
index 4f5da89cc72edd792015763539c9af4677772a79..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_no_restriction_binary_search_strategy.py
+++ /dev/null
@@ -1,110 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.binary_search_strategy as binary_search_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as common known arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-expected_order = [
-        (0,3), # workload dim 0
-        (0,1), 
-        (0,0),
-        (1,3), # workload dim 1
-        (1,1),
-        (1,2),
-        (2,3), # workload dim 2
-        (2,1),
-        (2,2),
-        (3,3), # workload dim 3
-        (3,1),
-        (3,2),
-        (4,3), # workload dim 4
-        (4,5),
-        (4,4),
-        (5,3), # workload dim 5
-        (5,5),
-        (5,6),
-        (6,3), # workload dim 6
-        (6,5),
-        (6,6)
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(last_experiment)
-    print("Index was expected to be:")
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_binary_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=no_lower_bound_strategy,
-        search_strategy=binary_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_check_all_strategy.py b/execution/strategies/tests/test_no_restriction_check_all_strategy.py
deleted file mode 100644
index f173a3d168704cc7a499933984b6510ebda2751e..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_no_restriction_check_all_strategy.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.check_all_strategy as check_all_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0), # workload dim 0
-        (0,1),
-        (0,2),
-        (0,3),
-        (0,4),
-        (0,5),
-        (0,6),
-        (1,0), # workload dim 1
-        (1,1),
-        (1,2),
-        (1,3),
-        (1,4),
-        (1,5),
-        (1,6),
-        (2,0), # workload dim 2
-        (2,1),
-        (2,2), 
-        (2,3),
-        (2,4),
-        (2,5),
-        (2,6),
-        (3,0), # workload dim 4
-        (3,1),
-        (3,2), 
-        (3,3),
-        (3,4),
-        (3,5),
-        (3,6),
-        (4,0), # workload dim 4
-        (4,1),
-        (4,2), 
-        (4,3),
-        (4,4),
-        (4,5),
-        (4,6),
-        (5,0), # workload dim 5
-        (5,1),
-        (5,2), 
-        (5,3),
-        (5,4),
-        (5,5),
-        (5,6),
-        (6,0), # workload dim 6
-        (6,1),
-        (6,2), 
-        (6,3),
-        (6,4),
-        (6,5),
-        (6,6),
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=no_lower_bound_strategy,
-        search_strategy=check_all_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/strategies/tests/test_no_restriction_linear_search_strategy.py b/execution/strategies/tests/test_no_restriction_linear_search_strategy.py
deleted file mode 100644
index 0e47c2e95b75ae682e82a02ad3d0a91c5a62f253..0000000000000000000000000000000000000000
--- a/execution/strategies/tests/test_no_restriction_linear_search_strategy.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import pprint
-
-from strategies.config import ExperimentConfig
-import strategies.strategies.search.linear_search_strategy as linear_search_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-
-class Object(object):
-    pass
-
-pp = pprint.PrettyPrinter(indent=4)
-
-dim_values = [0, 1, 2, 3, 4, 5, 6]
-replicass = [0, 1, 2, 3, 4, 5, 6]
-
-# True means the experiment was successful
-# the experiments are indexed row (representing dimension values) and column (representing number of replicas) wise as usual arrays from 0 - 6 respectively.
-# this means the first row starts with (0,0), the second row with (1, 0) etc.
-successful = [
-       [ True , True , True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, True , True , True , True , True  ],
-       [ False, False, False, True , True , True , True  ],
-       [ False, False, False, False, True , True , True  ],
-       [ False, False, False, False, False, False, True  ],
-       [ False, False, False, False, False, False, False ] 
-    ]
-
-# the expected order of executed experiments
-expected_order = [
-        (0,0), # workload dim 0
-        (1,0), # workload dim 1
-        (1,1),
-        (1,2),
-        (2,0), # workload dim 2
-        (2,1),
-        (2,2),
-        (3,0), # workload dim 3
-        (3,1),
-        (3,2),
-        (3,3),
-        (4,0), # workload dim 4
-        (4,1),
-        (4,2),
-        (4,3),
-        (4,4),
-        (5,0), # workload dim 5
-        (5,1),
-        (5,2),
-        (5,3),
-        (5,4),
-        (5,5),
-        (5,6),
-        (6,0), # workload dim 6
-        (6,1),
-        (6,2),
-        (6,3),
-        (6,4),
-        (6,5),
-        (6,6)
-    ]
-
-last_experiment = (0, 0)
-experiment_counter = -1
-subexperiment_executor = Object()
-
-def subexperiment_executor_executor(config):
-    global experiment_counter, last_experiment, pp
-    print("Simulate subexperiment with config:")
-    pp.pprint(config)
-    last_experiment = (config.dim_value, config.replicas)
-    experiment_counter += 1
-    print("Simulation complete")
-
-subexperiment_executor.execute = subexperiment_executor_executor
-
-
-# returns True if the experiment was successful
-
-subexperiment_evaluator = Object()
-
-def subexperiment_evaluator_execute(i):
-    print("Evaluating last experiment. Index was:")
-    global expected_order, experiment_counter, last_experiment, successful
-    pp.pprint(expected_order[experiment_counter])
-    assert expected_order[experiment_counter] == last_experiment
-    print("Index was as expected. Evaluation finished.")
-    return 1 if successful[last_experiment[0]][last_experiment[1]] else 0
-
-subexperiment_evaluator.execute = subexperiment_evaluator_execute
-
-def test_linear_search_strategy():
-    # declare parameters
-    uc="test-uc"
-    partitions=40
-    cpu_limit="1000m"
-    memory_limit="4Gi"
-    kafka_streams_commit_interval_ms=100
-    execution_minutes=5
-
-    # execute
-    experiment_config = ExperimentConfig(
-        exp_id="0",
-        use_case=uc,
-        dim_values=dim_values,
-        replicass=replicass,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        kafka_streams_commit_interval_ms=kafka_streams_commit_interval_ms,
-        execution_minutes=execution_minutes,
-        domain_restriction_strategy=no_lower_bound_strategy,
-        search_strategy=linear_search_strategy,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
\ No newline at end of file
diff --git a/execution/theodolite.py b/execution/theodolite.py
deleted file mode 100755
index bd273c4405e2a406b5b5537e084957625c19aa96..0000000000000000000000000000000000000000
--- a/execution/theodolite.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-from lib.cli_parser import benchmark_parser
-import logging  # logging
-import os
-import run_uc
-import sys
-from strategies.config import ExperimentConfig
-import strategies.strategies.domain_restriction.lower_bound_strategy as lower_bound_strategy
-import strategies.strategies.domain_restriction.no_lower_bound_strategy as no_lower_bound_strategy
-import strategies.strategies.search.check_all_strategy as check_all_strategy
-import strategies.strategies.search.linear_search_strategy as linear_search_strategy
-import strategies.strategies.search.binary_search_strategy as binary_search_strategy
-from strategies.experiment_execution import ExperimentExecutor
-import strategies.subexperiment_execution.subexperiment_executor as subexperiment_executor
-import strategies.subexperiment_evaluation.subexperiment_evaluator as subexperiment_evaluator
-
-
-def load_variables():
-    """Load the CLI variables given at the command line"""
-    print('Load CLI variables')
-    parser = benchmark_parser("Run theodolite benchmarking")
-    args = parser.parse_args()
-    print(args)
-    if (args.uc is None or args.loads is None or args.instances_list is None) and not args.reset_only:
-        print('The options --uc, --loads and --instances are mandatory.')
-        print('Some might not be set!')
-        sys.exit(1)
-    return args
-
-
-def main(uc, loads, instances_list, partitions, cpu_limit, memory_limit,
-         duration, domain_restriction, search_strategy, threshold,
-         prometheus_base_url, reset, namespace, result_path, configurations):
-
-    print(
-        f"Domain restriction of search space activated: {domain_restriction}")
-    print(f"Chosen search strategy: {search_strategy}")
-
-    counter_path = f"{result_path}/exp_counter.txt"
-
-    if os.path.exists(counter_path):
-        with open(counter_path, mode="r") as read_stream:
-            exp_id = int(read_stream.read())
-    else:
-        exp_id = 0
-        # Create the directory if not exists
-        os.makedirs(result_path, exist_ok=True)
-
-    # Store metadata
-    separator = ","
-    lines = [
-        f'UC={uc}\n',
-        f'DIM_VALUES={separator.join(map(str, loads))}\n',
-        f'REPLICAS={separator.join(map(str, instances_list))}\n',
-        f'PARTITIONS={partitions}\n',
-        f'CPU_LIMIT={cpu_limit}\n',
-        f'MEMORY_LIMIT={memory_limit}\n',
-        f'EXECUTION_MINUTES={duration}\n',
-        f'DOMAIN_RESTRICTION={domain_restriction}\n',
-        f'SEARCH_STRATEGY={search_strategy}\n',
-        f'CONFIGURATIONS={configurations}'
-    ]
-    with open(f"{result_path}/exp{exp_id}_uc{uc}_meta.txt", "w") as stream:
-        stream.writelines(lines)
-
-    with open(counter_path, mode="w") as write_stream:
-        write_stream.write(str(exp_id + 1))
-
-    domain_restriction_strategy = None
-    search_strategy_method = None
-
-    # Select domain restriction
-    if domain_restriction:
-        # domain restriction
-        domain_restriction_strategy = lower_bound_strategy
-    else:
-        # no domain restriction
-        domain_restriction_strategy = no_lower_bound_strategy
-
-    # select search strategy
-    if search_strategy == "linear-search":
-        print(
-            f"Going to execute at most {len(loads)+len(instances_list)-1} subexperiments in total..")
-        search_strategy_method = linear_search_strategy
-    elif search_strategy == "binary-search":
-        search_strategy_method = binary_search_strategy
-    else:
-        print(
-            f"Going to execute {len(loads)*len(instances_list)} subexperiments in total..")
-        search_strategy_method = check_all_strategy
-
-    experiment_config = ExperimentConfig(
-        use_case=uc,
-        exp_id=exp_id,
-        dim_values=loads,
-        replicass=instances_list,
-        partitions=partitions,
-        cpu_limit=cpu_limit,
-        memory_limit=memory_limit,
-        execution_minutes=duration,
-        prometheus_base_url=prometheus_base_url,
-        reset=reset,
-        namespace=namespace,
-        configurations=configurations,
-        result_path=result_path,
-        domain_restriction_strategy=domain_restriction_strategy,
-        search_strategy=search_strategy_method,
-        threshold=threshold,
-        subexperiment_executor=subexperiment_executor,
-        subexperiment_evaluator=subexperiment_evaluator)
-
-    executor = ExperimentExecutor(experiment_config)
-    executor.execute()
-
-
-if __name__ == '__main__':
-    logging.basicConfig(level=logging.INFO)
-    args = load_variables()
-    if args.reset_only:
-        print('Only reset the cluster')
-        run_uc.main(None, None, None, None, None, None, None, None, None,
-                    None, args.namespace, None, None, reset_only=True)
-    else:
-        main(args.uc, args.loads, args.instances_list, args.partitions,
-             args.cpu_limit, args.memory_limit, args.duration,
-             args.domain_restriction, args.search_strategy,
-             args.threshold, args.prometheus, args.reset, args.namespace,
-             args.path, args.configurations)
diff --git a/execution/theodolite.yaml b/execution/theodolite.yaml
index 06d14a0f589b2ac7a16ebaaae4d1490b840ea57b..ff8eecb312d052eab6f2e66a0bd57d8a983d38e1 100644
--- a/execution/theodolite.yaml
+++ b/execution/theodolite.yaml
@@ -5,47 +5,60 @@ metadata:
 spec:
   template:
     spec:
-      volumes:
-      - name: theodolite-pv-storage
-        persistentVolumeClaim:
-          claimName: theodolite-pv-claim
+      securityContext:
+        runAsUser: 0 # Set the permissions for write access to the volumes.
       containers:
+        - name: lag-analysis
+          image: ghcr.io/cau-se/theodolite-slo-checker-lag-trend:theodolite-kotlin-latest
+          ports:
+          - containerPort: 80
+            name: analysis
         - name: theodolite
-          image: ghcr.io/cau-se/theodolite:latest
-          # imagePullPolicy: Never # Used to pull "own" local image
+          image: ghcr.io/cau-se/theodolite:theodolite-kotlin-latest
+          imagePullPolicy: Always
           env:
-            - name: UC # mandatory
-              value: "1"
-            - name: LOADS # mandatory
-              value: "100000, 200000"
-            - name: INSTANCES # mandatory
-              value: "1, 2, 3"
-            # - name: DURATION
-            #   value: "5"
-            # - name: PARTITIONS
-            #   value: "40"
-            # - name: DOMAIN_RESTRICTION
-            #   value: "True"
-            # - name: SEARCH_STRATEGY
-            #   value: "linear-search"
-            # - name: CPU_LIMIT
-            #   value: "1000m"
-            # - name: MEMORY_LIMIT
-            #   value: "4Gi"
-            - name: PROMETHEUS_BASE_URL
-              value: "http://prometheus-operated:9090"
-            # - name: NAMESPACE
-            #   value: "default"
-            # - name: CONFIGURATIONS
-            #   value: "COMMIT_INTERVAL_MS=100, NUM_STREAM_THREADS=1"
-            - name: RESULT_PATH
-              value: "results"
-            - name: PYTHONUNBUFFERED # Enable logs in Kubernetes
-              value: "1"
+            - name: NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+
+            # - name: MODE
+            #   value: yaml-executor # Default is `yaml-executor`
+            - name: THEODOLITE_EXECUTION
+              value: /etc/execution/example-execution-yaml-resource.yaml # The name of this file must correspond to the filename of the execution, from which the config map is created.
+            - name: THEODOLITE_BENCHMARK
+              value: /etc/benchmark/example-benchmark-yaml-resource.yaml # The name of this file must correspond to the filename of the benchmark, from which the config map is created.
+            - name: THEODOLITE_APP_RESOURCES
+              value: /etc/app-resources
+            - name: RESULTS_FOLDER # Folder for saving results
+              value: results # Default is the pwd (/deployments)
+            # - name: CREATE_RESULTS_FOLDER # Specify whether the specified result folder should be created if it does not exist.
+            #   value: "false" # Default is false.
           volumeMounts:
-            - mountPath: "/app/results"
+            - mountPath: "/deployments/results" # the mounted path must corresponds to the value of `RESULT_FOLDER`.
               name: theodolite-pv-storage
+            - mountPath: "/etc/app-resources" # must be corresponds to the value of `THEODOLITE_APP_RESOURCES`.
+              name: app-resources
+            - mountPath: "/etc/benchmark"  # must be corresponds to the value of `THEODOLITE_BENCHMARK`.
+              name: benchmark
+            - mountPath: "/etc/execution" # must be corresponds to the value of `THEODOLITE_EXECUTION`.
+              name: execution
       restartPolicy: Never
       # Uncomment if RBAC is enabled and configured
-      # serviceAccountName: theodolite
-  backoffLimit: 4
+      serviceAccountName: theodolite
+      # Multiple volumes are needed to provide the corresponding files.
+      # The names must correspond to the created configmaps and the volumeMounts.
+      volumes:
+        - name: theodolite-pv-storage
+          persistentVolumeClaim:
+            claimName: theodolite-pv-claim
+        - name: app-resources
+          configMap:
+            name: app-resources-configmap
+        - name: benchmark
+          configMap:
+            name: benchmark-configmap
+        - name: execution
+          configMap:
+            name: execution-configmap
+  backoffLimit: 4
\ No newline at end of file
diff --git a/execution/uc-application/aggregation-deployment.yaml b/execution/uc-application/aggregation-deployment.yaml
deleted file mode 100644
index 07732ca1dd1e6b2b06f098dfb10a53d38e8d5cae..0000000000000000000000000000000000000000
--- a/execution/uc-application/aggregation-deployment.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-aggregation
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-aggregation
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-aggregation
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: uc-application
-        image: uc-app:latest
-        ports:
-        - containerPort: 5555
-          name: jmx
-        env:
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
-        - name: JAVA_OPTS
-          value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
-        - name: COMMIT_INTERVAL_MS # Set as default for the applications
-          value: "100"
-        resources:
-          limits:
-            memory: 4Gi
-            cpu: 1000m
-      - name: prometheus-jmx-exporter
-        image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
-        command:
-          - java
-          - -XX:+UnlockExperimentalVMOptions
-          - -XX:+UseCGroupMemoryLimitForHeap
-          - -XX:MaxRAMFraction=1
-          - -XshowSettings:vm
-          - -jar
-          - jmx_prometheus_httpserver.jar
-          - "5556"
-          - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
-        ports:
-          - containerPort: 5556
-        volumeMounts:
-          - name: jmx-config
-            mountPath: /etc/jmx-aggregation
-      volumes:
-        - name: jmx-config
-          configMap:
-            name: aggregation-jmx-configmap
diff --git a/execution/uc-workload-generator/workloadGenerator.yaml b/execution/uc-workload-generator/workloadGenerator.yaml
deleted file mode 100644
index 146e285f66d4c0e1a88d613e4ac2d5571234fad6..0000000000000000000000000000000000000000
--- a/execution/uc-workload-generator/workloadGenerator.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: titan-ccp-load-generator
-spec:
-  selector:
-    matchLabels:
-      app: titan-ccp-load-generator
-  replicas: 1
-  template:
-    metadata:
-      labels:
-        app: titan-ccp-load-generator
-    spec:
-      terminationGracePeriodSeconds: 0
-      containers:
-      - name: workload-generator
-        image: workload-generator:latest
-        ports:
-        - containerPort: 5701
-          name: coordination
-        env:
-        # Order need to be preserved for run_uc.py
-        - name: NUM_SENSORS
-          value: "25000"
-        - name: NUM_NESTED_GROUPS
-          value: "5"
-        - name: KUBERNETES_NAMESPACE
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.namespace
-        - name: KUBERNETES_DNS_NAME
-          value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
-        - name: KAFKA_BOOTSTRAP_SERVERS
-          value: "my-confluent-cp-kafka:9092"
-        - name: SCHEMA_REGISTRY_URL
-          value: "http://my-confluent-cp-schema-registry:8081"
diff --git a/slope-evaluator/Dockerfile b/slope-evaluator/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..032b8153a6989ca04631ba553289dacb3620a38d
--- /dev/null
+++ b/slope-evaluator/Dockerfile
@@ -0,0 +1,6 @@
+FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7
+
+COPY requirements.txt requirements.txt
+RUN pip install -r requirements.txt
+
+COPY ./app /app
\ No newline at end of file
diff --git a/slope-evaluator/README.md b/slope-evaluator/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..cd9e6820ed46452ce44d57d0c7e5cd5ae05e5a3b
--- /dev/null
+++ b/slope-evaluator/README.md
@@ -0,0 +1,61 @@
+# Lag Trend SLO Evaluator
+
+## Execution
+
+For development:
+
+```sh
+uvicorn main:app --reload # run this command inside the app/ folder
+```
+
+## Build the docker image:
+
+```sh
+docker build . -t theodolite-evaluator
+```
+
+Run the Docker image:
+
+```sh
+docker run -p 80:80 theodolite-evaluator
+```
+
+## Configuration
+
+You can set the `HOST` and the `PORT` (and a lot of more parameters) via environment variables. Default is `0.0.0.0:80`.
+For more information see the [Gunicorn/FastAPI Docker docs](https://github.com/tiangolo/uvicorn-gunicorn-fastapi-docker#advanced-usage).
+
+## API Documentation
+
+The running webserver provides a REST API with the following route:
+
+* /evaluate-slope
+    * Method: POST
+    * Body:
+        * total_lags
+        * threshold
+        * warmup
+
+The body of the request must be a JSON string that satisfies the following conditions:
+
+* **total_lag**: This property is based on the [Range Vector type](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors) from Prometheus and must have the following JSON structure:
+    ```
+        { 
+            [
+                "metric": {
+                    "group": "<label_value>"
+                },
+                "values": [
+                    [
+                        <unix_timestamp>,
+                        "<sample_value>"
+                    ]
+                ]
+            ]
+        }
+    ```
+    * The `<label_value>` provided in "metric.group" must be equal to the id of the Kafka consumer group.
+    * The `<unix_timestamp>` provided as the first element of each element in the "values" array must be the timestamp of the measurement value in seconds (with optional decimal precision)
+    * The `<sample_value>` must be the measurement value as string.
+* **threshold**: Must be an unsigned integer that specifies the threshold for the SLO evaluation. The SLO is considered fulfilled, if the result value is below the threshold. If the result value is equal or above the threshold, the SLO is considered not fulfilled.
+* **warmup**: Specifieds the warmup time in seconds that are ignored for evaluating the SLO.
\ No newline at end of file
diff --git a/slope-evaluator/app/main.py b/slope-evaluator/app/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f6788f0ca84b7710be5b509ca4f0641047e963d
--- /dev/null
+++ b/slope-evaluator/app/main.py
@@ -0,0 +1,55 @@
+from fastapi import FastAPI,Request
+import trend_slope_computer as trend_slope_computer
+import logging
+import os
+import pandas as pd
+import json
+import sys
+from statistics import median
+
+app = FastAPI()
+
+logging.basicConfig(stream=sys.stdout,
+                    format="%(asctime)s %(levelname)s %(name)s: %(message)s")
+logger = logging.getLogger("API")
+
+
+if os.getenv('LOG_LEVEL') == 'INFO':
+    logger.setLevel(logging.INFO)
+elif os.getenv('LOG_LEVEL') == 'WARNING':
+    logger.setLevel(logging.WARNING)
+elif os.getenv('LOG_LEVEL') == 'DEBUG':
+    logger.setLevel(logging.DEBUG)
+
+def calculate_slope_trend(results, warmup):
+    d = []
+    for result in results:
+        group = result['metric']['group']
+        for value in result['values']:
+            d.append({'group': group, 'timestamp': int(
+                value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
+
+    df = pd.DataFrame(d)
+
+    logger.info("Calculating trend slope with warmup of %s seconds for data frame:\n %s", warmup, df)
+    try:
+        trend_slope = trend_slope_computer.compute(df, warmup)
+    except Exception as e:
+        err_msg = 'Computing trend slope failed.'
+        logger.exception(err_msg)
+        logger.error('Mark this subexperiment as not successful and continue benchmark.')
+        return False
+
+    logger.info("Computed lag trend slope is '%s'", trend_slope)
+    return trend_slope
+
+def check_service_level_objective(results, threshold):
+    return median(results) < threshold
+
+@app.post("/evaluate-slope",response_model=bool)
+async def evaluate_slope(request: Request):
+    data = json.loads(await request.body())
+    results = [calculate_slope_trend(total_lag, data['warmup']) for total_lag in data['total_lags']]
+    return check_service_level_objective(results=results, threshold=data["threshold"])
+
+logger.info("SLO evaluator is online")
\ No newline at end of file
diff --git a/slope-evaluator/app/test.py b/slope-evaluator/app/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b165ea479bb9a552edaba7692df4fd4ef3f4ab4
--- /dev/null
+++ b/slope-evaluator/app/test.py
@@ -0,0 +1,30 @@
+import unittest
+from main import app, check_service_level_objective
+import json
+from fastapi.testclient import TestClient
+
+class TestSloEvaluation(unittest.TestCase):
+    client = TestClient(app)
+
+    def test_1_rep(self):
+        with open('../resources/test-1-rep-success.json') as json_file:
+            data = json.load(json_file)
+            response = self.client.post("/evaluate-slope", json=data)
+            self.assertEquals(response.json(), True)
+
+    def test_3_rep(self):
+        with open('../resources/test-3-rep-success.json') as json_file:
+            data = json.load(json_file)
+            response = self.client.post("/evaluate-slope", json=data)
+            self.assertEquals(response.json(), True)
+        
+    def test_check_service_level_objective(self):
+        list = [1,2,3,4]
+        self.assertEquals(check_service_level_objective(list, 2), False)
+        self.assertEquals(check_service_level_objective(list, 3), True)
+        list = [1,2,3,4,5]
+        self.assertEquals(check_service_level_objective(list, 2), False)
+        self.assertEquals(check_service_level_objective(list, 4), True)
+
+if __name__ == '__main__':
+    unittest.main()
\ No newline at end of file
diff --git a/execution/lib/trend_slope_computer.py b/slope-evaluator/app/trend_slope_computer.py
similarity index 52%
rename from execution/lib/trend_slope_computer.py
rename to slope-evaluator/app/trend_slope_computer.py
index 90ae26cfd275f53307e19532f047e5e0a9326d3a..51b28f2baa5110a6d64f3adc1ac9a94c6b6f3ce9 100644
--- a/execution/lib/trend_slope_computer.py
+++ b/slope-evaluator/app/trend_slope_computer.py
@@ -2,14 +2,12 @@ from sklearn.linear_model import LinearRegression
 import pandas as pd
 import os
 
-def compute(directory, filename, warmup_sec):
-    df = pd.read_csv(os.path.join(directory, filename))
-    input = df
-    input['sec_start'] = input.loc[0:, 'timestamp'] - input.iloc[0]['timestamp']
-    regress = input.loc[input['sec_start'] >= warmup_sec] # Warm-Up
+def compute(data, warmup_sec):
+    data['sec_start'] = data.loc[0:, 'timestamp'] - data.iloc[0]['timestamp']
+    regress = data.loc[data['sec_start'] >= warmup_sec] # Warm-Up
 
-    X = regress.iloc[:, 2].values.reshape(-1, 1)  # values converts it into a numpy array
-    Y = regress.iloc[:, 3].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
+    X = regress.iloc[:, 1].values.reshape(-1, 1)  # values converts it into a numpy array
+    Y = regress.iloc[:, 2].values.reshape(-1, 1)  # -1 means that calculate the dimension of rows, but have 1 column
     linear_regressor = LinearRegression()  # create object for the class
     linear_regressor.fit(X, Y)  # perform linear regression
     Y_pred = linear_regressor.predict(X)  # make predictions
diff --git a/slope-evaluator/requirements.txt b/slope-evaluator/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..670815f35b18361951a2fa7b2142eee6bc86b01d
--- /dev/null
+++ b/slope-evaluator/requirements.txt
@@ -0,0 +1,5 @@
+fastapi==0.55.1
+scikit-learn==0.20.3
+pandas==1.0.3
+uvicorn
+requests
diff --git a/slope-evaluator/resources/test-1-rep-success.json b/slope-evaluator/resources/test-1-rep-success.json
new file mode 100644
index 0000000000000000000000000000000000000000..9e315c707be7b2a874c58fcb1093aa86f7676560
--- /dev/null
+++ b/slope-evaluator/resources/test-1-rep-success.json
@@ -0,0 +1,139 @@
+{
+    "total_lags": [
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621008960827E9,
+                        "234"
+                    ],
+                    [
+                        1.621008965827E9,
+                        "234"
+                    ],
+                    [
+                        1.621008970827E9,
+                        "234"
+                    ],
+                    [
+                        1.621008975827E9,
+                        "719"
+                    ],
+                    [
+                        1.621008980827E9,
+                        "719"
+                    ],
+                    [
+                        1.621008985827E9,
+                        "719"
+                    ],
+                    [
+                        1.621008990827E9,
+                        "1026"
+                    ],
+                    [
+                        1.621008995827E9,
+                        "1026"
+                    ],
+                    [
+                        1.621009000827E9,
+                        "1026"
+                    ],
+                    [
+                        1.621009005827E9,
+                        "534"
+                    ],
+                    [
+                        1.621009010827E9,
+                        "534"
+                    ],
+                    [
+                        1.621009015827E9,
+                        "534"
+                    ],
+                    [
+                        1.621009020827E9,
+                        "943"
+                    ],
+                    [
+                        1.621009025827E9,
+                        "943"
+                    ],
+                    [
+                        1.621009030827E9,
+                        "943"
+                    ],
+                    [
+                        1.621009035827E9,
+                        "66"
+                    ],
+                    [
+                        1.621009040827E9,
+                        "66"
+                    ],
+                    [
+                        1.621009045827E9,
+                        "66"
+                    ],
+                    [
+                        1.621009050827E9,
+                        "841"
+                    ],
+                    [
+                        1.621009055827E9,
+                        "841"
+                    ],
+                    [
+                        1.621009060827E9,
+                        "841"
+                    ],
+                    [
+                        1.621009065827E9,
+                        "405"
+                    ],
+                    [
+                        1.621009070827E9,
+                        "405"
+                    ],
+                    [
+                        1.621009075827E9,
+                        "405"
+                    ],
+                    [
+                        1.621009080827E9,
+                        "201"
+                    ],
+                    [
+                        1.621009085827E9,
+                        "201"
+                    ],
+                    [
+                        1.621009090827E9,
+                        "201"
+                    ],
+                    [
+                        1.621009095827E9,
+                        "227"
+                    ],
+                    [
+                        1.621009100827E9,
+                        "227"
+                    ],
+                    [
+                        1.621009105827E9,
+                        "227"
+                    ],
+                    [
+                        1.621009110827E9,
+                        "943"
+                    ]
+                ]
+            }
+        ]
+    ],
+    "threshold": 2000,
+    "warmup": 0
+}
\ No newline at end of file
diff --git a/slope-evaluator/resources/test-3-rep-success.json b/slope-evaluator/resources/test-3-rep-success.json
new file mode 100644
index 0000000000000000000000000000000000000000..485966cba40f01e4a646e626914510ba49b707bc
--- /dev/null
+++ b/slope-evaluator/resources/test-3-rep-success.json
@@ -0,0 +1,289 @@
+{
+    "total_lags": [
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621012384232E9,
+                        "6073"
+                    ],
+                    [
+                        1.621012389232E9,
+                        "6073"
+                    ],
+                    [
+                        1.621012394232E9,
+                        "6073"
+                    ],
+                    [
+                        1.621012399232E9,
+                        "227"
+                    ],
+                    [
+                        1.621012404232E9,
+                        "227"
+                    ],
+                    [
+                        1.621012409232E9,
+                        "227"
+                    ],
+                    [
+                        1.621012414232E9,
+                        "987"
+                    ],
+                    [
+                        1.621012419232E9,
+                        "987"
+                    ],
+                    [
+                        1.621012424232E9,
+                        "987"
+                    ],
+                    [
+                        1.621012429232E9,
+                        "100"
+                    ],
+                    [
+                        1.621012434232E9,
+                        "100"
+                    ],
+                    [
+                        1.621012439232E9,
+                        "100"
+                    ],
+                    [
+                        1.621012444232E9,
+                        "959"
+                    ],
+                    [
+                        1.621012449232E9,
+                        "959"
+                    ],
+                    [
+                        1.621012454232E9,
+                        "959"
+                    ],
+                    [
+                        1.621012459232E9,
+                        "625"
+                    ],
+                    [
+                        1.621012464232E9,
+                        "625"
+                    ],
+                    [
+                        1.621012469232E9,
+                        "625"
+                    ],
+                    [
+                        1.621012474232E9,
+                        "683"
+                    ],
+                    [
+                        1.621012479232E9,
+                        "683"
+                    ],
+                    [
+                        1.621012484232E9,
+                        "683"
+                    ],
+                    [
+                        1.621012489232E9,
+                        "156"
+                    ]
+                ]
+            }
+        ],
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621012545211E9,
+                        "446"
+                    ],
+                    [
+                        1.621012550211E9,
+                        "446"
+                    ],
+                    [
+                        1.621012555211E9,
+                        "446"
+                    ],
+                    [
+                        1.621012560211E9,
+                        "801"
+                    ],
+                    [
+                        1.621012565211E9,
+                        "801"
+                    ],
+                    [
+                        1.621012570211E9,
+                        "801"
+                    ],
+                    [
+                        1.621012575211E9,
+                        "773"
+                    ],
+                    [
+                        1.621012580211E9,
+                        "773"
+                    ],
+                    [
+                        1.621012585211E9,
+                        "773"
+                    ],
+                    [
+                        1.621012590211E9,
+                        "509"
+                    ],
+                    [
+                        1.621012595211E9,
+                        "509"
+                    ],
+                    [
+                        1.621012600211E9,
+                        "509"
+                    ],
+                    [
+                        1.621012605211E9,
+                        "736"
+                    ],
+                    [
+                        1.621012610211E9,
+                        "736"
+                    ],
+                    [
+                        1.621012615211E9,
+                        "736"
+                    ],
+                    [
+                        1.621012620211E9,
+                        "903"
+                    ],
+                    [
+                        1.621012625211E9,
+                        "903"
+                    ],
+                    [
+                        1.621012630211E9,
+                        "903"
+                    ],
+                    [
+                        1.621012635211E9,
+                        "512"
+                    ],
+                    [
+                        1.621012640211E9,
+                        "512"
+                    ],
+                    [
+                        1.621012645211E9,
+                        "512"
+                    ]
+                ]
+            }
+        ],
+        [
+            {
+                "metric": {
+                    "group": "theodolite-uc1-application-0.0.1"
+                },
+                "values": [
+                    [
+                        1.621012700748E9,
+                        "6484"
+                    ],
+                    [
+                        1.621012705748E9,
+                        "6484"
+                    ],
+                    [
+                        1.621012710748E9,
+                        "6484"
+                    ],
+                    [
+                        1.621012715748E9,
+                        "505"
+                    ],
+                    [
+                        1.621012720748E9,
+                        "505"
+                    ],
+                    [
+                        1.621012725748E9,
+                        "505"
+                    ],
+                    [
+                        1.621012730748E9,
+                        "103"
+                    ],
+                    [
+                        1.621012735748E9,
+                        "103"
+                    ],
+                    [
+                        1.621012740748E9,
+                        "103"
+                    ],
+                    [
+                        1.621012745748E9,
+                        "201"
+                    ],
+                    [
+                        1.621012750748E9,
+                        "201"
+                    ],
+                    [
+                        1.621012755748E9,
+                        "201"
+                    ],
+                    [
+                        1.621012760748E9,
+                        "965"
+                    ],
+                    [
+                        1.621012765748E9,
+                        "965"
+                    ],
+                    [
+                        1.621012770748E9,
+                        "965"
+                    ],
+                    [
+                        1.621012775748E9,
+                        "876"
+                    ],
+                    [
+                        1.621012780748E9,
+                        "876"
+                    ],
+                    [
+                        1.621012785748E9,
+                        "876"
+                    ],
+                    [
+                        1.621012790748E9,
+                        "380"
+                    ],
+                    [
+                        1.621012795748E9,
+                        "380"
+                    ],
+                    [
+                        1.621012800748E9,
+                        "380"
+                    ]
+                ]
+            }
+        ]
+    ],
+    "threshold": 2000,
+    "warmup": 0
+}
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc1-flink/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..36178e2bebdac96b8648bd6c299009aa49d3fff6
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    jobmanager.rpc.address: flink-jobmanager
+    taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc1-flink/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..512d4fe3c786e1b2c44e6ec57fccadf41a2e2eeb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/jobmanager-deployment.yaml
@@ -0,0 +1,93 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc1-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc1.application.HistoryServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc1-flink/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc1-flink/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-flink/service-monitor.yaml b/theodolite-benchmarks/definitions/uc1-flink/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc1-flink/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc1-flink/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7d46554692696b194736df6023eed5686040497d
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/taskmanager-deployment.yaml
@@ -0,0 +1,87 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc1-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc1-flink/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc1-flink/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-flink/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b80b572dfd30e9c056d3c01ba17cc662d70fc749
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-benchmark-operator.yaml
@@ -0,0 +1,38 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc1-kstreams
+spec:
+  appResource:
+    - "uc1-kstreams-deployment.yaml"
+    - "uc1-kstreams-service.yaml"
+    - "uc1-jmx-configmap.yaml"
+    - "uc1-service-monitor.yaml"
+  loadGenResource:
+    - "uc1-load-generator-deployment.yaml"
+    - "uc1-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc1-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: "NumSensorsLoadGeneratorReplicaPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..12cbd8ea310423d28e35de8185288b27257c15ec
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-benchmark-standalone.yaml
@@ -0,0 +1,31 @@
+name: "uc1-kstreams"
+appResource:
+  - "uc1-kstreams-deployment.yaml"
+  - "uc1-kstreams-service.yaml"
+  - "uc1-jmx-configmap.yaml"
+  - "uc1-service-monitor.yaml"
+loadGenResource:
+  - "uc1-load-generator-deployment.yaml"
+  - "uc1-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc1-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc1-load-generator-deployment.yaml"
+        container: "workload-generator"
+        variableName: "NUM_SENSORS"
+      - type: NumSensorsLoadGeneratorReplicaPatcher
+        resource: "uc1-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "15000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
\ No newline at end of file
diff --git a/execution/uc-application/jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-jmx-configmap.yaml
similarity index 100%
rename from execution/uc-application/jmx-configmap.yaml
rename to theodolite-benchmarks/definitions/uc1-kstreams/uc1-jmx-configmap.yaml
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..171c3446db2719ee91bd8954233015316851fcf9
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc1-kstreams/uc1-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9f9ccc6ae39407bb1f027e1e23cb152944b869e0
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/execution/uc-workload-generator/load-generator-service.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-load-generator-service.yaml
similarity index 71%
rename from execution/uc-workload-generator/load-generator-service.yaml
rename to theodolite-benchmarks/definitions/uc1-kstreams/uc1-load-generator-service.yaml
index c1299e373009dee5fa4cc87093ebc684c7f2e333..f8b26b3f6dece427f9c1ad4db94e351b042749b3 100644
--- a/execution/uc-workload-generator/load-generator-service.yaml
+++ b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-load-generator-service.yaml
@@ -10,7 +10,7 @@ spec:
   selector:
     app: titan-ccp-load-generator
   ports:
-  - name: coordination
-    port: 5701
-    targetPort: 5701
-    protocol: TCP
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/execution/uc-application/service-monitor.yaml b/theodolite-benchmarks/definitions/uc1-kstreams/uc1-service-monitor.yaml
similarity index 100%
rename from execution/uc-application/service-monitor.yaml
rename to theodolite-benchmarks/definitions/uc1-kstreams/uc1-service-monitor.yaml
diff --git a/theodolite-benchmarks/definitions/uc2-flink/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc2-flink/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..321541f6ac8715b8546b964d8ad2b7c28552fbcd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    #jobmanager.rpc.address: flink-jobmanager
+    #taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc2-flink/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cece4286d49a3f6ff139ca7f1e01c647acd5d9f3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/jobmanager-deployment.yaml
@@ -0,0 +1,93 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc2-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc2.application.HistoryServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc2-flink/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc2-flink/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-flink/service-monitor.yaml b/theodolite-benchmarks/definitions/uc2-flink/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc2-flink/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc2-flink/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c0a10f65aae92e4ac1fd8fb92bae97794c142232
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/taskmanager-deployment.yaml
@@ -0,0 +1,87 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc2-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc2-flink/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc2-flink/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-flink/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b65bbdedb055c206c1ebcd7ab6a450318ee8c00f
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-benchmark-operator.yaml
@@ -0,0 +1,41 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+spec:
+  metadata:
+    name: uc2-kstreams
+  appResource:
+    - "uc2-kstreams-deployment.yaml"
+    - "uc2-kstreams-service.yaml"
+    - "uc2-jmx-configmap.yaml"
+    - "uc2-service-monitor.yaml"
+  loadGenResource:
+    - "uc2-load-generator-deployment.yaml"
+    - "uc2-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc2-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc2-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc2-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e38f83f5b05d05febb59c2f775a29b2d545acf0e
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-benchmark-standalone.yaml
@@ -0,0 +1,37 @@
+name: "uc2-kstreams"
+appResource:
+  - "uc2-kstreams-deployment.yaml"
+  - "uc2-kstreams-service.yaml"
+  - "uc2-jmx-configmap.yaml"
+  - "uc2-service-monitor.yaml"
+loadGenResource:
+  - "uc2-load-generator-deployment.yaml"
+  - "uc2-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc2-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc2-load-generator-deployment.yaml"
+        properties:
+          container: "workload-generator"
+          variableName: "NUM_SENSORS"
+      - type: NumSensorsLoadGeneratorReplicaPatcher
+        resource: "uc2-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "15000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "output"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e07bb3f9e536655712c06a004c5d1fb60ffa67e0
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc2-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dfc0af71543c15b12b5c850919feb0e0a4f52f28
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc2-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-load-generator-service.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/definitions/uc2-kstreams/uc2-service-monitor.yaml b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc2-kstreams/uc2-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc3-flink/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc3-flink/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..321541f6ac8715b8546b964d8ad2b7c28552fbcd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    #jobmanager.rpc.address: flink-jobmanager
+    #taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc3-flink/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..33bf1f1121a9764785db7a504799314a7ed40cf3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/jobmanager-deployment.yaml
@@ -0,0 +1,93 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc3-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc3.application.HistoryServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc3-flink/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc3-flink/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-flink/service-monitor.yaml b/theodolite-benchmarks/definitions/uc3-flink/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc3-flink/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc3-flink/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8f70b7308429f79cfd8f8bda7a7a96e2bc8d8689
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/taskmanager-deployment.yaml
@@ -0,0 +1,87 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc3-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc3-flink/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc3-flink/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-flink/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bfbd7191c5f4a315db29100bcc05341f88cffec2
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-benchmark-operator.yaml
@@ -0,0 +1,41 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+spec:
+  metadata:
+    name: uc3-kstreams
+  appResource:
+    - "uc3-kstreams-deployment.yaml"
+    - "uc3-kstreams-service.yaml"
+    - "uc3-jmx-configmap.yaml"
+    - "uc3-service-monitor.yaml"
+  loadGenResource:
+    - "uc3-load-generator-deployment.yaml"
+    - "uc3-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc3-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc3-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumSensorsLoadGeneratorReplicaPatcher
+          resource: "uc3-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e00c1672c4a5a02128c2618b525573a4cddd6c72
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-benchmark-standalone.yaml
@@ -0,0 +1,37 @@
+name: "uc3-kstreams"
+appResource:
+  - "uc3-kstreams-deployment.yaml"
+  - "uc3-kstreams-service.yaml"
+  - "uc3-jmx-configmap.yaml"
+  - "uc3-service-monitor.yaml"
+loadGenResource:
+  - "uc3-load-generator-deployment.yaml"
+  - "uc3-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc3-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc3-load-generator-deployment.yaml"
+        properties:
+          container: "workload-generator"
+          variableName: "NUM_SENSORS"
+      - type: NumSensorsLoadGeneratorReplicaPatcher
+        resource: "uc3-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "15000"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "output"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e3f63fae9e245e6116e0fe451480d9bc74b36433
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc3-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c1cad0b70fd82a5bbb43792ee79f9cf5cc71d95f
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-load-generator-deployment.yaml
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc3-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-load-generator-service.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/definitions/uc3-kstreams/uc3-service-monitor.yaml b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc3-kstreams/uc3-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc4-flink/flink-configuration-configmap.yaml b/theodolite-benchmarks/definitions/uc4-flink/flink-configuration-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..321541f6ac8715b8546b964d8ad2b7c28552fbcd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/flink-configuration-configmap.yaml
@@ -0,0 +1,66 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: flink-config
+  labels:
+    app: flink
+data:
+  flink-conf.yaml: |+
+    #jobmanager.rpc.address: flink-jobmanager
+    #taskmanager.numberOfTaskSlots: 1 #TODO
+    #blob.server.port: 6124
+    #jobmanager.rpc.port: 6123
+    #taskmanager.rpc.port: 6122
+    #queryable-state.proxy.ports: 6125
+    #jobmanager.memory.process.size: 4Gb
+    #taskmanager.memory.process.size: 4Gb
+    #parallelism.default: 1 #TODO
+    metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
+    metrics.reporter.prom.interval: 10 SECONDS
+    taskmanager.network.detailed-metrics: true
+  # -> gives metrics about inbound/outbound network queue lengths
+  log4j-console.properties: |+
+    # This affects logging for both user code and Flink
+    rootLogger.level = INFO
+    rootLogger.appenderRef.console.ref = ConsoleAppender
+    rootLogger.appenderRef.rolling.ref = RollingFileAppender
+
+    # Uncomment this if you want to _only_ change Flink's logging
+    #logger.flink.name = org.apache.flink
+    #logger.flink.level = INFO
+
+    # The following lines keep the log level of common libraries/connectors on
+    # log level INFO. The root logger does not override this. You have to manually
+    # change the log levels here.
+    logger.akka.name = akka
+    logger.akka.level = INFO
+    logger.kafka.name= org.apache.kafka
+    logger.kafka.level = INFO
+    logger.hadoop.name = org.apache.hadoop
+    logger.hadoop.level = INFO
+    logger.zookeeper.name = org.apache.zookeeper
+    logger.zookeeper.level = INFO
+
+    # Log all infos to the console
+    appender.console.name = ConsoleAppender
+    appender.console.type = CONSOLE
+    appender.console.layout.type = PatternLayout
+    appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+
+    # Log all infos in the given rolling file
+    appender.rolling.name = RollingFileAppender
+    appender.rolling.type = RollingFile
+    appender.rolling.append = false
+    appender.rolling.fileName = ${sys:log.file}
+    appender.rolling.filePattern = ${sys:log.file}.%i
+    appender.rolling.layout.type = PatternLayout
+    appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
+    appender.rolling.policies.type = Policies
+    appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
+    appender.rolling.policies.size.size=100MB
+    appender.rolling.strategy.type = DefaultRolloverStrategy
+    appender.rolling.strategy.max = 10
+
+    # Suppress the irrelevant (wrong) warnings from the Netty channel handler
+    logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
+    logger.netty.level = OFF
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/jobmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc4-flink/jobmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b6533a2c4355e227a16aeface2080253bce19958
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/jobmanager-deployment.yaml
@@ -0,0 +1,93 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-jobmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: jobmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: jobmanager
+    spec:
+      containers:
+        - name: jobmanager
+          image: ghcr.io/cau-se/theodolite-uc4-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["standalone-job", "--job-classname", "theodolite.uc4.application.AggregationServiceFlinkJob"] # optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
+          #command: ['sleep', '60m']
+          ports:
+            - containerPort: 6123
+              name: rpc
+            - containerPort: 6124
+              name: blob-server
+            - containerPort: 8081
+              name: webui
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6123
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf
+#            - name: job-artifacts-volume
+#              mountPath: /opt/flink/usrlib
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-jobmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
+#        - name: job-artifacts-volume
+#          hostPath:
+#            path: /host/path/to/job/artifacts
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/jobmanager-rest-service.yaml b/theodolite-benchmarks/definitions/uc4-flink/jobmanager-rest-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3d74aaf7f625c6922e2e1b4f20c19e50a39b68ac
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/jobmanager-rest-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager-rest
+spec:
+  type: NodePort
+  ports:
+    - name: rest
+      port: 8081
+      targetPort: 8081
+      nodePort: 30081
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/jobmanager-service.yaml b/theodolite-benchmarks/definitions/uc4-flink/jobmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e2ff5d9898eb1ebf5db9a827472a47514ab1473c
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/jobmanager-service.yaml
@@ -0,0 +1,20 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-jobmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: rpc
+      port: 6123
+    - name: blob-server
+      port: 6124
+    - name: webui
+      port: 8081
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: jobmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-flink/service-monitor.yaml b/theodolite-benchmarks/definitions/uc4-flink/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..02f78823c627e27ddfe1db5eac3f6a7f7a7f1bf8
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: flink
+    appScope: titan-ccp
+  name: flink
+spec:
+  selector:
+    matchLabels:
+        app: flink
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/definitions/uc4-flink/taskmanager-deployment.yaml b/theodolite-benchmarks/definitions/uc4-flink/taskmanager-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7363b013b21ad29b481e449113ccf31538505634
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/taskmanager-deployment.yaml
@@ -0,0 +1,87 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: flink-taskmanager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: flink
+      component: taskmanager
+  template:
+    metadata:
+      labels:
+        app: flink
+        component: taskmanager
+    spec:
+      containers:
+        - name: taskmanager
+          image: ghcr.io/cau-se/theodolite-uc4-flink:latest
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: COMMIT_INTERVAL_MS
+              value: "100"
+            - name: CHECKPOINTING
+              value: "false"
+            - name: PARALLELISM
+              value: "1"
+            - name: "FLINK_STATE_BACKEND"
+              value: "rocksdb"
+            - name: JOB_MANAGER_RPC_ADDRESS
+              value: "flink-jobmanager"
+            - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
+              value: "1" #TODO
+            - name: FLINK_PROPERTIES
+              value: |+
+                blob.server.port: 6124
+                jobmanager.rpc.port: 6123
+                taskmanager.rpc.port: 6122
+                queryable-state.proxy.ports: 6125
+                jobmanager.memory.process.size: 4Gb
+                taskmanager.memory.process.size: 4Gb
+                #parallelism.default: 1 #TODO
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+          args: ["taskmanager"]
+          ports:
+            - containerPort: 6122
+              name: rpc
+            - containerPort: 6125
+              name: query-state
+            - containerPort: 9249
+              name: metrics
+          livenessProbe:
+            tcpSocket:
+              port: 6122
+            initialDelaySeconds: 30
+            periodSeconds: 60
+          volumeMounts:
+            - name: flink-config-volume-rw
+              mountPath: /opt/flink/conf/
+          securityContext:
+            runAsUser: 9999  # refers to user _flink_ from official flink image, change if necessary
+      initContainers:
+        - name: init-taskmanager
+          image: busybox:1.28
+          command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
+          volumeMounts:
+            - name: flink-config-volume
+              mountPath: /flink-config/
+            - name: flink-config-volume-rw
+              mountPath: /flink-config-rw/
+      volumes:
+        - name: flink-config-volume
+          configMap:
+            name: flink-config
+            items:
+              - key: flink-conf.yaml
+                path: flink-conf.yaml
+              - key: log4j-console.properties
+                path: log4j-console.properties
+        - name: flink-config-volume-rw
+          emptyDir: {}
diff --git a/theodolite-benchmarks/definitions/uc4-flink/taskmanager-service.yaml b/theodolite-benchmarks/definitions/uc4-flink/taskmanager-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2e27f64af1cfd1a26da142b8a50bb41c8ba5fcb
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-flink/taskmanager-service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: flink-taskmanager
+  labels:
+    app: flink
+spec:
+  type: ClusterIP
+  ports:
+    - name: metrics
+      port: 9249
+  selector:
+    app: flink
+    component: taskmanager
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-benchmark-operator.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-benchmark-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0ed48c9afd0d8d02493f7afc2df3e440d0ffabdd
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-benchmark-operator.yaml
@@ -0,0 +1,48 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+spec:
+  metadata:
+    name: uc4-kstreams
+  appResource:
+    - "uc4-kstreams-deployment.yaml"
+    - "uc4-kstreams-service.yaml"
+    - "uc4-jmx-configmap.yaml"
+    - "uc4-service-monitor.yaml"
+  loadGenResource:
+    - "uc4-load-generator-deployment.yaml"
+    - "uc4-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc4-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumNestedGroups"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc4-load-generator-deployment.yaml"
+          properties:
+            container: "workload-generator"
+            variableName: "NUM_SENSORS"
+        - type: NumNestedGroupsLoadGeneratorReplicaPatcher
+          resource: "uc4-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+            numSensors: "4.0"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "output"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "configuration"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "aggregation-feedback"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-benchmark-standalone.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-benchmark-standalone.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..96e72c9b6d726267044464cce6deb32f60442e96
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-benchmark-standalone.yaml
@@ -0,0 +1,44 @@
+name: "uc4-kstreams"
+appResource:
+  - "uc4-kstreams-deployment.yaml"
+  - "uc4-kstreams-service.yaml"
+  - "uc4-jmx-configmap.yaml"
+  - "uc4-service-monitor.yaml"
+loadGenResource:
+  - "uc4-load-generator-deployment.yaml"
+  - "uc4-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc4-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumNestedGroups"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc4-load-generator-deployment.yaml"
+        properties:
+          container: "workload-generator"
+          variableName: "NUM_NESTED_GROUPS"
+      - type: "NumNestedGroupsLoadGeneratorReplicaPatcher"
+        resource: "uc4-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "15000"
+          numSensors: "4.0"
+kafkaConfig:
+  bootstrapServer: "theodolite-cp-kafka:9092"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "output"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "configuration"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "aggregation-feedback"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-jmx-configmap.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-deployment.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..20e0872d262df46b5c213d9d529983f5f4155735
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc4-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-service.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-kstreams-service.yaml
@@ -0,0 +1,17 @@
+apiVersion: v1
+kind: Service
+metadata:  
+  name: titan-ccp-aggregation
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  #type: NodePort
+  selector:    
+    app: titan-ccp-aggregation
+  ports:  
+  - name: http
+    port: 80
+    targetPort: 80
+    protocol: TCP
+  - name: metrics
+    port: 5556
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-load-generator-deployment.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7a69d13daae57b06c77f316da9aa953b21ac096b
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-load-generator-deployment.yaml
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc4-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: NUM_NESTED_GROUPS
+              value: "5"
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-load-generator-service.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-benchmarks/definitions/uc4-kstreams/uc4-service-monitor.yaml b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-benchmarks/definitions/uc4-kstreams/uc4-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java
index 8d9832e40253fe9e3178bfc25047ed2b376abe76..0cb132e526486e71409736b843dd25bdfa52da4a 100644
--- a/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java
+++ b/theodolite-benchmarks/uc1-flink/src/main/java/theodolite/uc1/application/HistoryServiceFlinkJob.java
@@ -47,7 +47,7 @@ public final class HistoryServiceFlinkJob {
     // Parallelism
     final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
     if (parallelism != null) {
-      LOGGER.error("Set parallelism: {}.", parallelism);
+      LOGGER.info("Set parallelism: {}.", parallelism);
       this.env.setParallelism(parallelism);
     }
 
@@ -68,7 +68,7 @@ public final class HistoryServiceFlinkJob {
     final DataStream<ActivePowerRecord> stream = this.env.addSource(kafkaConsumer);
 
     stream
-        .rebalance()
+        // .rebalance()
         .map(new GsonMapper())
         .flatMap((record, c) -> LOGGER.info("Record: {}", record))
         .returns(Types.GENERIC(Object.class)); // Will never be used
diff --git a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java
index 1068267086892c4538001b6afc670b3b0cd043ef..d156d895d86bb01a31f96e08764df8b8df743c4d 100644
--- a/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java
+++ b/theodolite-benchmarks/uc2-flink/src/main/java/theodolite/uc2/application/HistoryServiceFlinkJob.java
@@ -59,7 +59,7 @@ public final class HistoryServiceFlinkJob {
     // Parallelism
     final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
     if (parallelism != null) {
-      LOGGER.error("Set parallelism: {}.", parallelism);
+      LOGGER.info("Set parallelism: {}.", parallelism);
       this.env.setParallelism(parallelism);
     }
 
@@ -83,7 +83,9 @@ public final class HistoryServiceFlinkJob {
     final String schemaRegistryUrl = this.config.getString(ConfigurationKeys.SCHEMA_REGISTRY_URL);
     final String inputTopic = this.config.getString(ConfigurationKeys.KAFKA_INPUT_TOPIC);
     final String outputTopic = this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC);
-    final int windowDuration = this.config.getInt(ConfigurationKeys.KAFKA_WINDOW_DURATION_MINUTES);
+    final int windowDurationMinutes =
+        this.config.getInt(ConfigurationKeys.KAFKA_WINDOW_DURATION_MINUTES);
+    final Time windowDuration = Time.minutes(windowDurationMinutes);
     final boolean checkpointing = this.config.getBoolean(ConfigurationKeys.CHECKPOINTING, true);
 
     final KafkaConnectorFactory kafkaConnector = new KafkaConnectorFactory(
@@ -100,9 +102,9 @@ public final class HistoryServiceFlinkJob {
 
     this.env
         .addSource(kafkaSource).name("[Kafka Consumer] Topic: " + inputTopic)
-        .rebalance()
+        // .rebalance()
         .keyBy(ActivePowerRecord::getIdentifier)
-        .window(TumblingEventTimeWindows.of(Time.minutes(windowDuration)))
+        .window(TumblingEventTimeWindows.of(windowDuration))
         .aggregate(new StatsAggregateFunction(), new StatsProcessWindowFunction())
         .map(t -> {
           final String key = t.f0;
diff --git a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java
index d69ee47d8c831f2e5e74abdd8c33393c8ee6e07e..091b25674a2a31671ca68bd2076c694da9533d77 100644
--- a/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java
+++ b/theodolite-benchmarks/uc3-flink/src/main/java/theodolite/uc3/application/HistoryServiceFlinkJob.java
@@ -117,9 +117,8 @@ public final class HistoryServiceFlinkJob {
     // Streaming topology
     final StatsKeyFactory<HourOfDayKey> keyFactory = new HourOfDayKeyFactory();
     this.env
-        .addSource(kafkaSource)
-        .name("[Kafka Consumer] Topic: " + inputTopic)
-        .rebalance()
+        .addSource(kafkaSource).name("[Kafka Consumer] Topic: " + inputTopic)
+        // .rebalance()
         .keyBy((KeySelector<ActivePowerRecord, HourOfDayKey>) record -> {
           final Instant instant = Instant.ofEpochMilli(record.getTimestamp());
           final LocalDateTime dateTime = LocalDateTime.ofInstant(instant, timeZone);
diff --git a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java
index 45c7ff1ad1faeec6357e4ac3871dec7a51306698..3e2878a893057024de00333492462f5029eb6d77 100644
--- a/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java
+++ b/theodolite-benchmarks/uc4-flink/src/main/java/theodolite/uc4/application/AggregationServiceFlinkJob.java
@@ -79,7 +79,7 @@ public final class AggregationServiceFlinkJob {
     // Parallelism
     final Integer parallelism = this.config.getInteger(ConfigurationKeys.PARALLELISM, null);
     if (parallelism != null) {
-      LOGGER.error("Set parallelism: {}.", parallelism);
+      LOGGER.info("Set parallelism: {}.", parallelism);
       this.env.setParallelism(parallelism);
     }
 
@@ -152,7 +152,7 @@ public final class AggregationServiceFlinkJob {
     // Build input stream
     final DataStream<ActivePowerRecord> inputStream = this.env.addSource(kafkaInputSource)
         .name("[Kafka Consumer] Topic: " + inputTopic)// NOCS
-        .rebalance()
+        // .rebalance()
         .map(r -> r)
         .name("[Map] Rebalance Forward");
 
@@ -160,7 +160,7 @@ public final class AggregationServiceFlinkJob {
     final DataStream<ActivePowerRecord> aggregationsInputStream =
         this.env.addSource(kafkaOutputSource)
             .name("[Kafka Consumer] Topic: " + outputTopic) // NOCS
-            .rebalance()
+            // .rebalance()
             .map(r -> new ActivePowerRecord(r.getIdentifier(), r.getTimestamp(), r.getSumInW()))
             .name("[Map] AggregatedActivePowerRecord -> ActivePowerRecord");
 
diff --git a/theodolite-quarkus/.dockerignore b/theodolite-quarkus/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..d95caadc42523460fa9d78cf17629c8ee231acc9
--- /dev/null
+++ b/theodolite-quarkus/.dockerignore
@@ -0,0 +1,6 @@
+*
+!build/*-runner
+!build/*-runner.jar
+!build/lib/*
+!build/quarkus-app/*
+!config/*
\ No newline at end of file
diff --git a/theodolite-quarkus/.gitignore b/theodolite-quarkus/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a1eff0e1d4dddacdbcafa2c235b28616cb53e7bf
--- /dev/null
+++ b/theodolite-quarkus/.gitignore
@@ -0,0 +1,33 @@
+# Gradle
+.gradle/
+build/
+
+# Eclipse
+.project
+.classpath
+.settings/
+bin/
+
+# IntelliJ
+.idea
+*.ipr
+*.iml
+*.iws
+
+# NetBeans
+nb-configuration.xml
+
+# Visual Studio Code
+.vscode
+.factorypath
+
+# OSX
+.DS_Store
+
+# Vim
+*.swp
+*.swo
+
+# patch
+*.orig
+*.rej
diff --git a/theodolite-quarkus/README.md b/theodolite-quarkus/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc1d1bfe4a9c20a515cf6e69208657f74694d80e
--- /dev/null
+++ b/theodolite-quarkus/README.md
@@ -0,0 +1,134 @@
+# Theodolite-quarkus project
+
+This project uses Quarkus, the Supersonic Subatomic Java Framework.
+
+If you want to learn more about Quarkus, please visit its website: https://quarkus.io/ .
+
+## Running the application in dev mode
+
+You can run your application in dev mode using:
+
+```shell script
+./gradlew quarkusDev
+```
+
+## Packaging and running the application
+
+The application can be packaged using:
+
+```shell script
+./gradlew build
+```
+
+It produces the `theodolite-quarkus-1.0.0-SNAPSHOT-runner.jar` file in the `/build` directory. Be aware that it’s not
+an _über-jar_ as the dependencies are copied into the `build/lib` directory.
+
+If you want to build an _über-jar_, execute the following command:
+
+```shell script
+./gradlew build -Dquarkus.package.type=uber-jar
+```
+
+The application is now runnable using `java -jar build/theodolite-quarkus-1.0.0-SNAPSHOT-runner.jar`.
+
+## Creating a native executable
+
+You can create a native executable using:
+
+```shell script
+./gradlew build -Dquarkus.package.type=native
+```
+
+Or, if you don't have GraalVM installed, you can run the native executable build in a container using:
+
+```shell script
+./gradlew build -Dquarkus.package.type=native -Dquarkus.native.container-build=true
+```
+
+You can then execute your native executable with:
+```./build/theodolite-quarkus-1.0.0-SNAPSHOT-runner```
+
+If you want to learn more about building native executables, please consult https://quarkus.io/guides/gradle-tooling.
+
+## Build docker images
+
+For the jvm version use:
+
+```shell script
+./gradlew build
+docker build -f src/main/docker/Dockerfile.jvm -t theodolite-quarkus-jvm .
+```
+
+For the native image version use:
+
+```shell script
+./gradlew build -Dquarkus.package.type=native
+docker build -f src/main/docker/Dockerfile.native -t theodolite-quarkus-native .
+```
+
+## Execute docker images:
+
+Remember to set the environment variables first.
+
+Jvm version:
+
+```shell script
+docker run -i --rm theodolite-quarkus-jvm
+```
+
+Native image version:
+
+```shell script
+docker run -i --rm theodolite-quarkus-native
+```
+
+## Environment variables
+
+**Production:** (Docker-Container)
+
+| Variables name               | Default value                      |Usage         |
+| -----------------------------|:----------------------------------:| ------------:|
+| `NAMESPACE`                  | `default`                          |Determines the namespace of the Theodolite will be executed in. Used in the KubernetesBenchmark|
+| `THEODOLITE_EXECUTION`       |  `./config/BenchmarkExecution.yaml`|The complete path to the benchmarkExecution file. Used in the TheodoliteYamlExecutor. |
+| `THEODOLITE_BENCHMARK_TYPE`  |  `./config/BenchmarkType.yaml`     |The complete path to the benchmarkType file. Used in the TheodoliteYamlExecutor.|
+| `THEODOLITE_APP_RESOURCES`   |  `./config`                        |The path under which the yamls for the resources for the subexperiments are found. Used in the KubernetesBenchmark|
+| `MODE`                       | `yaml-executor`                    |  Defines the mode of operation: either `yaml-executor` or `operator`|
+
+**Development:** (local via Intellij)
+
+When running Theodolite from within IntelliJ via
+[Run Configurations](https://www.jetbrains.com/help/idea/work-with-gradle-tasks.html#gradle_run_config), set the *
+Environment variables* field to:
+
+```
+NAMESPACE=default;THEODOLITE_BENCHMARK=./../../../../config/BenchmarkType.yaml;THEODOLITE_APP_RESOURCES=./../../../../config;THEODOLITE_EXECUTION=./../../../../config/BenchmarkExecution.yaml;MODE=operator
+```
+
+Alternative:
+
+``` sh
+export NAMESPACE=default
+export THEODOLITE_BENCHMARK=./../../../../config/BenchmarkType.yaml
+export THEODOLITE_APP_RESOURCES=./../../../../config
+export THEODOLITE_EXECUTION=./../../../../config/BenchmarkExecution.yaml
+export MODE=operator
+./gradlew quarkusDev
+
+```
+
+#### Install Detekt Code analysis Plugin
+
+Install https://plugins.jetbrains.com/plugin/10761-detekt
+
+- Install the plugin
+- Navigate to Settings/Preferences -> Tools -> Detekt
+- Check Enable Detekt
+- Specify your detekt configuration and baseline file (optional)
+
+-> detekt issues will be annotated on-the-fly while coding
+
+**ingore Failures in build:** add
+
+```ignoreFailures = true```
+
+to build.gradle detekt task
diff --git a/theodolite-quarkus/build.gradle b/theodolite-quarkus/build.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..3082deaf12fc48c6aca97ffd00b9c74cd7e6c143
--- /dev/null
+++ b/theodolite-quarkus/build.gradle
@@ -0,0 +1,68 @@
+plugins {
+    id 'org.jetbrains.kotlin.jvm' version "1.3.72"
+    id "org.jetbrains.kotlin.plugin.allopen" version "1.3.72"
+    id 'io.quarkus'
+    id "io.gitlab.arturbosch.detekt" version "1.15.0"   //For code style
+    id "org.jlleitschuh.gradle.ktlint" version "10.0.0" // same as above
+}
+
+repositories {
+    mavenLocal()
+    mavenCentral()
+    jcenter()
+}
+
+dependencies {
+    implementation enforcedPlatform("${quarkusPlatformGroupId}:${quarkusPlatformArtifactId}:${quarkusPlatformVersion}")
+    implementation 'io.quarkus:quarkus-kotlin'
+    implementation 'org.jetbrains.kotlin:kotlin-stdlib-jdk8'
+    implementation 'io.quarkus:quarkus-arc'
+    implementation 'io.quarkus:quarkus-resteasy'
+    implementation 'com.google.code.gson:gson:2.8.5'
+    implementation 'org.slf4j:slf4j-simple:1.7.29'
+    implementation 'io.github.microutils:kotlin-logging:1.12.0'
+    implementation('io.fabric8:kubernetes-client:5.4.1'){force = true}
+    implementation('io.fabric8:kubernetes-model-core:5.4.1'){force = true}
+    implementation('io.fabric8:kubernetes-model-common:5.4.1'){force = true}
+    implementation 'org.apache.kafka:kafka-clients:2.7.0'
+    implementation 'khttp:khttp:1.0.0'
+
+    compile 'junit:junit:4.12'
+
+    testImplementation 'io.quarkus:quarkus-junit5'
+    testImplementation 'io.rest-assured:rest-assured'
+    testImplementation 'org.junit-pioneer:junit-pioneer:1.4.0'
+    testImplementation ('io.fabric8:kubernetes-server-mock:5.4.1'){force = true}
+}
+
+group 'theodolite'
+version '0.5.0-SNAPSHOT'
+
+java {
+    sourceCompatibility = JavaVersion.VERSION_11
+    targetCompatibility = JavaVersion.VERSION_11
+}
+
+allOpen {
+    annotation("javax.ws.rs.Path")
+    annotation("javax.enterprise.context.ApplicationScoped")
+    annotation("io.quarkus.test.junit.QuarkusTest")
+}
+
+compileKotlin {
+    kotlinOptions.jvmTarget = JavaVersion.VERSION_11
+    kotlinOptions.javaParameters = true
+}
+
+compileTestKotlin {
+    kotlinOptions.jvmTarget = JavaVersion.VERSION_11
+}
+detekt {
+    failFast = true // fail build on any finding
+    buildUponDefaultConfig = true
+    ignoreFailures = true
+}
+
+ktlint {
+    ignoreFailures = true
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/build_jvm.sh b/theodolite-quarkus/build_jvm.sh
new file mode 100755
index 0000000000000000000000000000000000000000..95e2e44427a894a0513e6358b439d23e3eea834b
--- /dev/null
+++ b/theodolite-quarkus/build_jvm.sh
@@ -0,0 +1,6 @@
+
+./gradlew build -x test
+
+docker build -f src/main/docker/Dockerfile.jvm -t quarkus/theodolite-quarkus-jvm .
+
+docker run -i --rm -p 8080:8080 quarkus/theodolite-quarkus-jvm
diff --git a/theodolite-quarkus/build_native.sh b/theodolite-quarkus/build_native.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1effa3268ce5b863a680c6f4bdc6b4b632b2d4c6
--- /dev/null
+++ b/theodolite-quarkus/build_native.sh
@@ -0,0 +1,6 @@
+
+./gradlew build -Dquarkus.package.type=native -x test
+
+docker build -f src/main/docker/Dockerfile.native -t quarkus/theodolite-quarkus .
+
+docker run -i --rm -p 8080:8080 quarkus/theodolite-quarkus
diff --git a/theodolite-quarkus/config/README.md b/theodolite-quarkus/config/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..23337d77375ebba8f624e7a11f714502fe3d5e67
--- /dev/null
+++ b/theodolite-quarkus/config/README.md
@@ -0,0 +1,201 @@
+## The Benchmark Object
+
+The *benchmark* object defines all static components of an execution of a benchmark with Theodolite.
+An exapmle for a benchmark object is given in [example-benchmark-yaml-resource](example-benchmark-yaml-resource.yaml).
+
+
+A **Benchmark** is a [*standard tool for the competitive evaluation and comparison of competing systems or components according to specific characteristics, such as performance, dependability, or security*](https://doi.org/10.1145/2668930.2688819). In Theodolite, we have [specification-based benchmarks](https://doi.org/10.1145/2668930.2688819), or at least something very close to that. That is, our benchmarks are architectural descriptions---in our case---[of typical use cases of stream processing in microservices](https://doi.org/10.1016/j.bdr.2021.100209) (e.g. our UC1). Hence, we don't really have a piece of software, which represents a benchmark. We only have implementations of benchmarks, e.g. an implementation of UC1 with Kafka Streams. For simplification, we call these *benchmark implementations* simply *benchmarks*.
+
+```yaml
+name: String
+appResource:
+  - String
+  ...
+loadGenResource:
+  - String
+  ...
+resourceTypes:
+  - typeName: String
+    patchers:
+      - type: String
+        resources: String
+        properties:
+          <Patcher Arguments> ...
+      ...
+loadTypes:
+  - typeName: String
+  patchers:
+    - type: String
+      resources: String
+      properties:
+        <Patcher Arguments> ...
+    ...
+kafkaConfig:
+  bootstrapServer: String
+  topics:
+    - name: String
+      numPartitions: UnsignedInt
+      replicationFactor: UnsignedInt
+    - name: String
+      removeOnly: bool
+    ...
+```
+
+The properties have the following definitions:
+
+* **name**: The name of the *benchmark*
+* **appResource**: A list of file names that reference Kubernetes resources that are deployed on the cluster for the system under test (SUT).
+* **loadGenResources**: A list of file names that reference Kubernetes resources that are deployed on the cluster for the load generator.
+* **resourceTypes**: A list of resource types that can be scaled for this *benchmark*. For each resource type the concrete values are defined in the *execution* object. Each resource type has the following structure:
+    * **typeName**: Name of the resource type.
+    * **patchers**: List of [patchers](#Patchers) used to scale this resource type. Each patcher has the following structure:
+        * **type**: Type of the [patcher](#Patchers). The concrete types can be looked up in the list of [patchers](#Patchers). 
+        * **resources**: Specifies the Kubernetes resource to be patched.
+        *  **properties**: *Patcher Arguments*: (Optional) Patcher specific additional arguments.
+* **loadTypes**: A list of load types that can be scaled for this *benchmark*. For each load type the concrete values are defined in the *execution* object. Each load type has the following structure:
+    * **typeName**: Name of the load type.
+    * **patchers**: List of patchers used to scale * **resourceTypes**: A list of resource types that can be scaled for this *benchmark*. For each resource type the concrete values are defined in the *execution* resource object.Each resource type has the following structure:
+    * **typeName**: Name of the resource type.
+    * **patchers**: List of patchers used to scale this resource type. Each patcher has the following structure:
+        * **type**: Type of the Patcher. The concrete types can be looked up in the list of patchers. 
+        * **resources**: Specifies the Kubernetes resource to be patched.
+        * **properties**: *Patcher Arguments*: (Optional) Patcher specific additional arguments as Map<String, String>.
+* **kafkaConfig**: Contains the Kafka configuration.
+    * **bootstrapServers**: The bootstrap servers connection string.
+    * **topics**: List of topics to be created for each [experiment](#Experiment). Alternative theodolite offers the possibility to remove certain topics after each experiment.
+        * **name**: The name of the topic.
+        * **numPartitions**: The number of partitions of the topic.
+        * **replicationFactor**: The replication factor of the topic.
+        * **removeOnly**: determines if this topic should only be deleted after each experiement. For removeOnly topics the name can be a RegEx describing the topic.
+    
+
+## The Execution Object
+
+A benchmark can be executed for different SUTs, by different users and multiple times. We call such an execution of a benchmark simply an *execution*. The *execution* object defines all conrete values of an Execution.
+An exapmle for an execution object is given in [example-execution-yaml-resource](example-benchmark-yaml-resource.yaml).
+
+
+```yaml
+name: String
+benchmark: String
+load:
+  loadType: String
+  loadValues:
+    - UnsignedInt
+    ...
+resources:
+  resourceType: String
+  resourceValues:
+    - UnsignedInt
+    ...
+slos:
+  - sloType: String
+    threshold: UnsignedInt
+    prometheusUrl: String
+    externalSloUrl: String
+    offset: SignedInt
+    warmup: UnsignedInt
+  ...
+executions:
+  strategy: "LinearSearch" or "BinarySearch"
+  duration: UnsignedInt
+  repetition: UnsignedInt
+  restrictions:
+    - "LowerBound"
+    ...
+configurationOverrides:
+  - patcher:
+      type: String
+      resource: String
+      properties:
+        <Patcher Arguments> ...
+  ...
+```
+
+The properties have the following definitions:
+
+* **name**: The name of the *execution*
+* **benchmark**: The name of the *benchmark* this *execution* is referring to.
+* **load**: Specifies the load values that are benchmarked.
+  * **loadType**: The type of the load. It must match one of the load types specified in the referenced *benchmark*.
+  * **loadValues**: List of load values for the specified load type.
+* **resources**: Specifies the scaling resource that is benchmarked.
+  * **resourceType**: The type of the resource. It must match one of the resource types specified in the referenced *benchmark*.
+  * **resourceValues**: List of resource values for the specified resource type.
+* **slos**: List of the Service Level Objective (SLO) for this *execution*. Each SLO has the following fields:
+  * **sloType**: The type of the SLO. It must match 'lag trend'.
+  * **threshold**: The threshold the SUT should meet for a sucessful experiment.
+  * **prometheusUrl**: Connection string for promehteus.
+  * **externalSloUrl**: Connection string for a external slo analysis.
+  * **offset**: Hours by which the start and end timestamp will be shifted (for different timezones).
+  * **warmup**: Seconds of time that are ignored in the analysis.
+* **executions**: Defines the overall parameter for the execution.
+  * **strategy**: Defines the used strategy for the execution: either 'LinearSearch' or 'BinarySearch'
+  * **duration**: Defines the duration of each [experiment](#Experiment) in seconds.
+  * **repetition**: Unused.
+  * **restrictions**: List of restriction strategys used to delimit the search space.
+    **- LowerBound**: Currently only supported *restriction strategy*.
+* **configurationOverrides**: List of patchers that are used to override existing configurations.
+  * **patcher**: Patcher used to patch a resource. Each patcher has the following structure:
+        * **type**: Type of the Patcher. The concrete types can be looked up in the list of patchers. 
+        * **resources**: Specifies the Kubernetes resource to be patched.
+        * **properties**: *Patcher Arguments*: (Optional) Patcher specific additional arguments.
+
+## Patchers
+
+* **ReplicaPatcher**: Allows to modify the number of Replicas for a kubernetes deployment.
+  * **type**: "ReplicaPatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+
+* **NumSensorsLoadGeneratorReplicaPatcher**: Allows to scale the nummer of load generators. Scales arcording to the following formular: (value + 15_000 - 1) / 15_000
+  * **type**: "NumSensorsLoadGeneratorReplicaPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+
+* **NumNestedGroupsLoadGeneratorReplicaPatcher**: Allows to scale the nummer of load generators. Scales arcording to the following formular: (4^(value) + 15_000 -1) /15_000
+  * **type**: "NumNestedGroupsLoadGeneratorReplicaPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+
+* **ReplicaPatcher**: Allows to modify the number of Replicas for a kubernetes deployment.
+  * **type**: "ReplicaPatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+
+* **EnvVarPatcher**: Allows to modify the value of an environment variable for a container in a kubernetes deployment. 
+  * **type**: "EnvVarPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+  * **properties**:
+    * container: "workload-generator"
+    * variableName: "NUM_SENSORS"
+
+* **NodeSelectorPatcher**: Changes the node selection field in kubernetes resources.
+  * **type**: "NodeSelectorPatcher"
+  * **resource**: "uc1-load-generator-deployment.yaml"
+  * **properties**:
+    * variableName: "env"
+  * **value**: "prod"
+
+* **ResourceLimitPatcher**: Changes the resource limit for a kubernetes resource.
+  * **resource**: "uc1-kstreams-deployment.yaml"
+  * **properties**:
+    * container: "uc-application"
+    * variableName: "cpu" or "memory"
+  * **value**:"1000m" or "2Gi"
+  
+* **SchedulerNamePatcher**: Changes the sheduler for kubernetes resources.
+  * **type**: "SchedulerNamePatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+  * **value**: "random-scheduler"
+
+* **ImagePatcher**: Changes the image of a kubernetes resource. Currently not fully implemented.
+  * **type**: "ImagePatcher"
+  * **resource**: "uc1-kstreams-deployment.yaml"
+  * **properties**:
+    * container: "uc-application"
+  * **value**: "dockerhubrepo/imagename"
+
+
+
+## Experiment
+According to [our benchmarking method](https://doi.org/10.1016/j.bdr.2021.100209), the execution of a benchmark requires performing multiple **Experiments**. I think what is actually done within/during an experiment is another level of detail. (But just for the sake of completeness: In an experiment, the benchmark implementation is deployed, load is generated according to the benchmark specification, some SLOs are monitored continuously, etc.)
+
+
+
diff --git a/execution/uc-application/aggregation-service.yaml b/theodolite-quarkus/config/aggregation-service.yaml
similarity index 86%
rename from execution/uc-application/aggregation-service.yaml
rename to theodolite-quarkus/config/aggregation-service.yaml
index 6317caf9fe624e42449b8f630d040a068709cda3..85432d04f225c30469f3232153ef6bd72bd02bdf 100644
--- a/execution/uc-application/aggregation-service.yaml
+++ b/theodolite-quarkus/config/aggregation-service.yaml
@@ -1,14 +1,14 @@
 apiVersion: v1
 kind: Service
-metadata:
+metadata:  
   name: titan-ccp-aggregation
   labels:
     app: titan-ccp-aggregation
 spec:
   #type: NodePort
-  selector:
+  selector:    
     app: titan-ccp-aggregation
-  ports:
+  ports:  
   - name: http
     port: 80
     targetPort: 80
diff --git a/theodolite-quarkus/config/example-execution-yaml-resource.yaml b/theodolite-quarkus/config/example-execution-yaml-resource.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e46a6cf417442b851650fe9699f73e1367dcc794
--- /dev/null
+++ b/theodolite-quarkus/config/example-execution-yaml-resource.yaml
@@ -0,0 +1,53 @@
+name: example-execution
+benchmark: "uc1-kstreams"
+load:
+  loadType: "NumSensors"
+  loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+resources:
+  resourceType: "Instances"
+  resourceValues: [1, 2, 3, 4, 5]
+slos:
+  - sloType: "lag trend"
+    threshold: 2000
+    prometheusUrl: "http://prometheus-operated:9090"
+    externalSloUrl: "http://localhost:80/evaluate-slope"
+    offset: 0
+    warmup: 60 # in seconds
+execution:
+  strategy: "LinearSearch"
+  duration: 300 # in seconds
+  repetitions: 1
+  loadGenerationDelay: 30 # in seconds, optional field, default is 0 seconds
+  restrictions:
+    - "LowerBound"
+configOverrides:
+  - patcher:
+      type: "NodeSelectorPatcher"
+      resource: "uc1-load-generator-deployment.yaml"
+      properties:
+        variableName: "env"
+    value: "prod"
+  - patcher:
+      type: "NodeSelectorPatcher"
+      resource: "uc1-kstreams-deployment.yaml"
+      properties:
+        variableName: "env"
+    value: "prod"
+  - patcher:
+      type: "ResourceLimitPatcher"
+      resource: "uc1-kstreams-deployment.yaml"
+      properties:
+        container: "uc-application"
+        limitedResource: "cpu"
+    value: "1000m"
+  - patcher:
+      type: "ResourceLimitPatcher"
+      resource: "uc1-kstreams-deployment.yaml"
+      properties:
+        container: "uc-application"
+        limitedResource: "memory"
+    value: "2Gi"
+#  - patcher:
+#      type: "SchedulerNamePatcher"
+#      resource: "uc1-kstreams-deployment.yaml"
+#    value: "random-scheduler"
diff --git a/theodolite-quarkus/config/example-operator-execution.yaml b/theodolite-quarkus/config/example-operator-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9b2a1facbd8be3411407dfcf3cad39fd9f3de6b6
--- /dev/null
+++ b/theodolite-quarkus/config/example-operator-execution.yaml
@@ -0,0 +1,57 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: example-execution
+spec:
+  benchmark: "uc1-kstreams"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      threshold: 2000
+      prometheusUrl: "http://prometheus-operated:9090"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides:
+    # - patcher:
+    #     type: "NodeSelectorPatcher"
+    #     resource: "uc1-load-generator-deployment.yaml"
+    #     properties:
+    #       variableName: "env"
+    #     value: "prod"
+    # - patcher:
+    #     type: "NodeSelectorPatcher"
+    #     resource: "uc1-kstreams-deployment.yaml"
+    #     properties:
+    #       variableName: "env"
+    #   value: "prod"
+    # - patcher:
+    #     type: "ResourceLimitPatcher"
+    #     resource: "uc1-kstreams-deployment.yaml"
+    #     properties:
+    #       container: "uc-application"
+    #       limitedResource: "cpu"
+    #   value: "1000m"
+    # - patcher:
+    #     type: "ResourceLimitPatcher"
+    #     resource: "uc1-kstreams-deployment.yaml"
+    #     properties:
+    #       container: "uc-application"
+    #       limitedResource: "memory"
+    #   value: "2Gi"
+    #  - patcher:
+    #      type: "SchedulerNamePatcher"
+    #      resource: "uc1-kstreams-deployment.yaml"
+    #    value: "random-scheduler"
diff --git a/theodolite-quarkus/config/jmx-configmap.yaml b/theodolite-quarkus/config/jmx-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667
--- /dev/null
+++ b/theodolite-quarkus/config/jmx-configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: aggregation-jmx-configmap
+data:
+  jmx-kafka-prometheus.yml: |+
+    jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi
+    lowercaseOutputName: true
+    lowercaseOutputLabelNames: true
+    ssl: false
diff --git a/theodolite-quarkus/config/service-monitor.yaml b/theodolite-quarkus/config/service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-quarkus/config/service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-quarkus/config/uc1-kstreams-deployment.yaml b/theodolite-quarkus/config/uc1-kstreams-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..171c3446db2719ee91bd8954233015316851fcf9
--- /dev/null
+++ b/theodolite-quarkus/config/uc1-kstreams-deployment.yaml
@@ -0,0 +1,55 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-quarkus/config/uc1-load-generator-deployment.yaml b/theodolite-quarkus/config/uc1-load-generator-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..374dd60113e133ef0a793149e3786efb38973287
--- /dev/null
+++ b/theodolite-quarkus/config/uc1-load-generator-deployment.yaml
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-load-generator
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-load-generator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-load-generator
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: workload-generator
+          image: ghcr.io/cau-se/theodolite-uc1-workload-generator:latest
+          ports:
+            - containerPort: 5701
+              name: coordination
+          env:
+            - name: NUM_SENSORS
+              value: "25000"
+            - name: NUM_NESTED_GROUPS
+              value: "5"
+            - name: KUBERNETES_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: KUBERNETES_DNS_NAME
+              value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local"
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "theodolite-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://theodolite-cp-schema-registry:8081"
diff --git a/theodolite-quarkus/config/uc1-load-generator-service.yaml b/theodolite-quarkus/config/uc1-load-generator-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f8b26b3f6dece427f9c1ad4db94e351b042749b3
--- /dev/null
+++ b/theodolite-quarkus/config/uc1-load-generator-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: titan-ccp-load-generator
+  labels:
+    app: titan-ccp-load-generator
+spec:
+  type: ClusterIP
+  clusterIP: None
+  selector:
+    app: titan-ccp-load-generator
+  ports:
+    - name: coordination
+      port: 5701
+      targetPort: 5701
+      protocol: TCP
diff --git a/theodolite-quarkus/config/uc1-service-monitor.yaml b/theodolite-quarkus/config/uc1-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096
--- /dev/null
+++ b/theodolite-quarkus/config/uc1-service-monitor.yaml
@@ -0,0 +1,14 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+        app: titan-ccp-aggregation
+  endpoints:
+    - port: metrics
+      interval: 10s
diff --git a/theodolite-quarkus/crd/crd-benchmark.yaml b/theodolite-quarkus/crd/crd-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b76821f6e7cca5408f604ba9bbf83cf1b43a37de
--- /dev/null
+++ b/theodolite-quarkus/crd/crd-benchmark.yaml
@@ -0,0 +1,117 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: benchmarks.theodolite.com
+spec:
+  group: theodolite.com
+  names:
+    kind: benchmark
+    plural: benchmarks
+    shortNames:
+      - bench
+  versions:
+  - name: v1
+    served: true
+    storage: true
+    schema:
+      openAPIV3Schema:
+        type: object
+        required: ["spec"]
+        properties:
+          spec:
+            type: object
+            required: []
+            properties:
+              name:
+                type: string
+              appResource:
+                type: array
+                minItems: 1
+                items:
+                  type: string
+              loadGenResource:
+                type: array
+                minItems: 1
+                items:
+                  type: string
+              resourceTypes:
+                type: array
+                minItems: 1
+                items:
+                  type: object
+                  properties:
+                    typeName:
+                      type: string
+                    patchers:
+                      type: array
+                      minItems: 1
+                      items:
+                        type: object
+                        properties:
+                          type:
+                            type: string
+                            default: ""
+                          resource:
+                            type: string
+                            default: ""
+                          properties:
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+              loadTypes:
+                type: array
+                minItems: 1
+                items:
+                  type: object
+                  properties:
+                    typeName:
+                      type: string
+                    patchers:
+                      type: array
+                      minItems: 1
+                      items:
+                        type: object
+                        properties:
+                          type:
+                            type: string
+                            default: ""
+                          resource:
+                            type: string
+                            default: ""
+                          properties:
+                            type: object
+                            additionalProperties: true
+                            x-kubernetes-map-type: "granular"
+                            default: {}
+              kafkaConfig:
+                type: object
+                properties:
+                  bootstrapServer:
+                    type: string
+                  topics:
+                    type: array
+                    minItems: 1
+                    items:
+                      type: object
+                      required: []
+                      properties:
+                        name:
+                          type: string
+                          default: ""
+                        numPartitions:
+                          type: integer
+                          default: 0
+                        replicationFactor:
+                          type: integer
+                          default: 0
+                        removeOnly:
+                          type: boolean
+                          default: false
+    additionalPrinterColumns:
+    - name: Age
+      type: date
+      jsonPath: .metadata.creationTimestamp
+    subresources:
+      status: {}
+  scope: Namespaced
\ No newline at end of file
diff --git a/theodolite-quarkus/crd/crd-execution.yaml b/theodolite-quarkus/crd/crd-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b984f3ebe5ca7c8868adb9d3593e5d87d73fc2bd
--- /dev/null
+++ b/theodolite-quarkus/crd/crd-execution.yaml
@@ -0,0 +1,128 @@
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  name: executions.theodolite.com
+spec:
+  group: theodolite.com
+  names:
+    kind: execution
+    plural: executions
+    shortNames:
+      - exec
+  versions:
+  - name: v1
+    served: true
+    storage: true
+    schema:
+      openAPIV3Schema:
+        type: object
+        required: ["spec"]
+        properties:
+          spec:
+            type: object
+            required: ["benchmark", "load", "resources", "slos", "execution", "configOverrides"]
+            properties:
+              name:
+                type: string
+                default: ""
+              benchmark:
+                type: string
+              load: # definition of the load dimension
+                type: object
+                required: ["loadType", "loadValues"]
+                properties:
+                  loadType:
+                   type: string
+                  loadValues:
+                    type: array
+                    items:
+                      type: integer
+              resources: # definition of the resource dimension
+                type: object
+                required: ["resourceType", "resourceValues"]
+                properties:
+                  resourceType:
+                    type: string
+                  resourceValues:
+                    type: array
+                    items:
+                      type: integer
+              slos: # def of service level objectives
+                type: array
+                items:
+                  type: object
+                  required: ["sloType", "threshold", "prometheusUrl", "externalSloUrl", "offset", "warmup"]
+                  properties:
+                    sloType:
+                      type: string
+                    threshold:
+                      type: integer
+                    prometheusUrl:
+                      type: string
+                    externalSloUrl:
+                      type: string
+                    offset:
+                      type: integer
+                    warmup:
+                      type: integer
+              execution: # def execution config
+                type: object
+                required: ["strategy", "duration", "repetitions", "restrictions"]
+                properties:
+                  strategy:
+                    type: string
+                  duration:
+                    type: integer
+                  repetitions:
+                    type: integer
+                  loadGenerationDelay:
+                    type: integer
+                  restrictions:
+                    type: array
+                    items:
+                      type: string
+              configOverrides:
+                type: array
+                items:
+                  type: object
+                  properties:
+                    patcher:
+                      type: object
+                      properties:
+                        type:
+                          type: string
+                          default: ""
+                        resource:
+                          type: string
+                          default: ""
+                        properties:
+                          type: object
+                          additionalProperties: true
+                          x-kubernetes-map-type: "granular"
+                          default: {}
+                    value:
+                      type: string
+          status:
+            type: object
+            properties:
+              executionState:
+                description: ""
+                type: string
+              executionDuration:
+                description: "Duration of the execution in seconds"
+                type: string
+    additionalPrinterColumns:
+    - name: STATUS
+      type: string
+      description: State of the execution
+      jsonPath: .status.executionState
+    - name: Duration
+      type: string
+      description: Duration of the execution
+      jsonPath: .status.executionDuration
+    - name: Age
+      type: date
+      jsonPath: .metadata.creationTimestamp
+    subresources:
+      status: {}
+  scope: Namespaced
\ No newline at end of file
diff --git a/theodolite-quarkus/examples/operator/example-benchmark.yaml b/theodolite-quarkus/examples/operator/example-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..91d9f8f1f7dfed31d9edcb59947af4e832ca2843
--- /dev/null
+++ b/theodolite-quarkus/examples/operator/example-benchmark.yaml
@@ -0,0 +1,38 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: uc1-kstreams
+spec:
+  appResource:
+    - "uc1-kstreams-deployment.yaml"
+    - "aggregation-service.yaml"
+    - "jmx-configmap.yaml"
+    - "uc1-service-monitor.yaml"
+  loadGenResource:
+    - "uc1-load-generator-deployment.yaml"
+    - "uc1-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc1-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            variableName: "NUM_SENSORS"
+            container: "workload-generator"
+        - type: "NumSensorsLoadGeneratorReplicaPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
\ No newline at end of file
diff --git a/theodolite-quarkus/examples/operator/example-execution.yaml b/theodolite-quarkus/examples/operator/example-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5386fd7c8665e01302067da81c5dd4caf87fc602
--- /dev/null
+++ b/theodolite-quarkus/examples/operator/example-execution.yaml
@@ -0,0 +1,57 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: theodolite-example-execution
+spec:
+  benchmark: "uc1-kstreams"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      threshold: 2000
+      prometheusUrl: "http://prometheus-operated:9090"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides:
+  # - patcher:
+  #     type: "NodeSelectorPatcher"
+  #     resource: "uc1-load-generator-deployment.yaml"
+  #     properties:
+  #       variableName: "env"
+  #     value: "prod"
+  # - patcher:
+  #     type: "NodeSelectorPatcher"
+  #     resource: "uc1-kstreams-deployment.yaml"
+  #     properties:
+  #       variableName: "env"
+  #   value: "prod"
+  # - patcher:
+  #     type: "ResourceLimitPatcher"
+  #     resource: "uc1-kstreams-deployment.yaml"
+  #     properties:
+  #       container: "uc-application"
+  #       limitedResource: "cpu"
+  #   value: "1000m"
+  # - patcher:
+  #     type: "ResourceLimitPatcher"
+  #     resource: "uc1-kstreams-deployment.yaml"
+  #     properties:
+  #       container: "uc-application"
+  #       limitedResource: "memory"
+  #   value: "2Gi"
+  #  - patcher:
+  #      type: "SchedulerNamePatcher"
+  #      resource: "uc1-kstreams-deployment.yaml"
+  #    value: "random-scheduler"
diff --git a/theodolite-quarkus/examples/standalone/example-benchmark.yaml b/theodolite-quarkus/examples/standalone/example-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..83edce93834ca9b8eef5606c1e5884ce40bdd7d8
--- /dev/null
+++ b/theodolite-quarkus/examples/standalone/example-benchmark.yaml
@@ -0,0 +1,34 @@
+name: "uc1-kstreams"
+appResource:
+  - "uc1-kstreams-deployment.yaml"
+  - "aggregation-service.yaml"
+  - "jmx-configmap.yaml"
+  - "uc1-service-monitor.yaml"
+loadGenResource:
+  - "uc1-load-generator-deployment.yaml"
+  - "uc1-load-generator-service.yaml"
+resourceTypes:
+  - typeName: "Instances"
+    patchers:
+      - type: "ReplicaPatcher"
+        resource: "uc1-kstreams-deployment.yaml"
+loadTypes:
+  - typeName: "NumSensors"
+    patchers:
+      - type: "EnvVarPatcher"
+        resource: "uc1-load-generator-deployment.yaml"
+        properties:
+          variableName: "NUM_SENSORS"
+          container: "workload-generator"
+      - type: "NumSensorsLoadGeneratorReplicaPatcher"
+        resource: "uc1-load-generator-deployment.yaml"
+        properties:
+          loadGenMaxRecords: "15000"
+kafkaConfig:
+  bootstrapServer: "localhost:31290"
+  topics:
+    - name: "input"
+      numPartitions: 40
+      replicationFactor: 1
+    - name: "theodolite-.*"
+      removeOnly: True
diff --git a/theodolite-quarkus/examples/standalone/example-execution.yaml b/theodolite-quarkus/examples/standalone/example-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..24b2b7f32e803553a4a13b76869ccf4cf3f6e5a5
--- /dev/null
+++ b/theodolite-quarkus/examples/standalone/example-execution.yaml
@@ -0,0 +1,23 @@
+name: example-execution
+benchmark: "uc1-kstreams"
+load:
+  loadType: "NumSensors"
+  loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+resources:
+  resourceType: "Instances"
+  resourceValues: [1, 2, 3, 4, 5]
+slos:
+  - sloType: "lag trend"
+    threshold: 2000
+    prometheusUrl: "http://prometheus-operated:9090"
+    externalSloUrl: "http://localhost:80/evaluate-slope"
+    offset: 0
+    warmup: 60 # in seconds
+execution:
+  strategy: "LinearSearch"
+  duration: 300 # in seconds
+  repetitions: 1
+  loadGenerationDelay: 30 # in seconds, optional field, default is 0 seconds
+  restrictions:
+    - "LowerBound"
+configOverrides: []
\ No newline at end of file
diff --git a/theodolite-quarkus/gradle.properties b/theodolite-quarkus/gradle.properties
new file mode 100644
index 0000000000000000000000000000000000000000..d7e4187c25e76dfb440650274b2d383f75a32242
--- /dev/null
+++ b/theodolite-quarkus/gradle.properties
@@ -0,0 +1,8 @@
+#Gradle properties
+quarkusPluginId=io.quarkus
+quarkusPluginVersion=1.10.3.Final
+quarkusPlatformGroupId=io.quarkus
+quarkusPlatformArtifactId=quarkus-universe-bom
+quarkusPlatformVersion=1.10.3.Final
+
+org.gradle.logging.level=INFO
\ No newline at end of file
diff --git a/theodolite-quarkus/gradle/wrapper/gradle-wrapper.jar b/theodolite-quarkus/gradle/wrapper/gradle-wrapper.jar
new file mode 100644
index 0000000000000000000000000000000000000000..62d4c053550b91381bbd28b1afc82d634bf73a8a
Binary files /dev/null and b/theodolite-quarkus/gradle/wrapper/gradle-wrapper.jar differ
diff --git a/theodolite-quarkus/gradle/wrapper/gradle-wrapper.properties b/theodolite-quarkus/gradle/wrapper/gradle-wrapper.properties
new file mode 100644
index 0000000000000000000000000000000000000000..bb8b2fc26b2e572c79d7212a4f6f11057c6787f7
--- /dev/null
+++ b/theodolite-quarkus/gradle/wrapper/gradle-wrapper.properties
@@ -0,0 +1,5 @@
+distributionBase=GRADLE_USER_HOME
+distributionPath=wrapper/dists
+distributionUrl=https\://services.gradle.org/distributions/gradle-6.5.1-bin.zip
+zipStoreBase=GRADLE_USER_HOME
+zipStorePath=wrapper/dists
diff --git a/theodolite-quarkus/gradlew b/theodolite-quarkus/gradlew
new file mode 100755
index 0000000000000000000000000000000000000000..fbd7c515832dab7b01092e80db76e5e03fe32d29
--- /dev/null
+++ b/theodolite-quarkus/gradlew
@@ -0,0 +1,185 @@
+#!/usr/bin/env sh
+
+#
+# Copyright 2015 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+##############################################################################
+##
+##  Gradle start up script for UN*X
+##
+##############################################################################
+
+# Attempt to set APP_HOME
+# Resolve links: $0 may be a link
+PRG="$0"
+# Need this for relative symlinks.
+while [ -h "$PRG" ] ; do
+    ls=`ls -ld "$PRG"`
+    link=`expr "$ls" : '.*-> \(.*\)$'`
+    if expr "$link" : '/.*' > /dev/null; then
+        PRG="$link"
+    else
+        PRG=`dirname "$PRG"`"/$link"
+    fi
+done
+SAVED="`pwd`"
+cd "`dirname \"$PRG\"`/" >/dev/null
+APP_HOME="`pwd -P`"
+cd "$SAVED" >/dev/null
+
+APP_NAME="Gradle"
+APP_BASE_NAME=`basename "$0"`
+
+# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
+
+# Use the maximum available, or set MAX_FD != -1 to use that value.
+MAX_FD="maximum"
+
+warn () {
+    echo "$*"
+}
+
+die () {
+    echo
+    echo "$*"
+    echo
+    exit 1
+}
+
+# OS specific support (must be 'true' or 'false').
+cygwin=false
+msys=false
+darwin=false
+nonstop=false
+case "`uname`" in
+  CYGWIN* )
+    cygwin=true
+    ;;
+  Darwin* )
+    darwin=true
+    ;;
+  MINGW* )
+    msys=true
+    ;;
+  NONSTOP* )
+    nonstop=true
+    ;;
+esac
+
+CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
+
+
+# Determine the Java command to use to start the JVM.
+if [ -n "$JAVA_HOME" ] ; then
+    if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
+        # IBM's JDK on AIX uses strange locations for the executables
+        JAVACMD="$JAVA_HOME/jre/sh/java"
+    else
+        JAVACMD="$JAVA_HOME/bin/java"
+    fi
+    if [ ! -x "$JAVACMD" ] ; then
+        die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+    fi
+else
+    JAVACMD="java"
+    which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+
+Please set the JAVA_HOME variable in your environment to match the
+location of your Java installation."
+fi
+
+# Increase the maximum file descriptors if we can.
+if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then
+    MAX_FD_LIMIT=`ulimit -H -n`
+    if [ $? -eq 0 ] ; then
+        if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then
+            MAX_FD="$MAX_FD_LIMIT"
+        fi
+        ulimit -n $MAX_FD
+        if [ $? -ne 0 ] ; then
+            warn "Could not set maximum file descriptor limit: $MAX_FD"
+        fi
+    else
+        warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT"
+    fi
+fi
+
+# For Darwin, add options to specify how the application appears in the dock
+if $darwin; then
+    GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\""
+fi
+
+# For Cygwin or MSYS, switch paths to Windows format before running java
+if [ "$cygwin" = "true" -o "$msys" = "true" ] ; then
+    APP_HOME=`cygpath --path --mixed "$APP_HOME"`
+    CLASSPATH=`cygpath --path --mixed "$CLASSPATH"`
+    
+    JAVACMD=`cygpath --unix "$JAVACMD"`
+
+    # We build the pattern for arguments to be converted via cygpath
+    ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null`
+    SEP=""
+    for dir in $ROOTDIRSRAW ; do
+        ROOTDIRS="$ROOTDIRS$SEP$dir"
+        SEP="|"
+    done
+    OURCYGPATTERN="(^($ROOTDIRS))"
+    # Add a user-defined pattern to the cygpath arguments
+    if [ "$GRADLE_CYGPATTERN" != "" ] ; then
+        OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)"
+    fi
+    # Now convert the arguments - kludge to limit ourselves to /bin/sh
+    i=0
+    for arg in "$@" ; do
+        CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -`
+        CHECK2=`echo "$arg"|egrep -c "^-"`                                 ### Determine if an option
+
+        if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then                    ### Added a condition
+            eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"`
+        else
+            eval `echo args$i`="\"$arg\""
+        fi
+        i=`expr $i + 1`
+    done
+    case $i in
+        0) set -- ;;
+        1) set -- "$args0" ;;
+        2) set -- "$args0" "$args1" ;;
+        3) set -- "$args0" "$args1" "$args2" ;;
+        4) set -- "$args0" "$args1" "$args2" "$args3" ;;
+        5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;;
+        6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;;
+        7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;;
+        8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;;
+        9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;;
+    esac
+fi
+
+# Escape application args
+save () {
+    for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done
+    echo " "
+}
+APP_ARGS=`save "$@"`
+
+# Collect all arguments for the java command, following the shell quoting and substitution rules
+eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS"
+
+exec "$JAVACMD" "$@"
diff --git a/theodolite-quarkus/gradlew.bat b/theodolite-quarkus/gradlew.bat
new file mode 100755
index 0000000000000000000000000000000000000000..a9f778a7a964b6f01c904ee667903f005d6df556
--- /dev/null
+++ b/theodolite-quarkus/gradlew.bat
@@ -0,0 +1,104 @@
+@rem
+@rem Copyright 2015 the original author or authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem      https://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+@rem
+
+@if "%DEBUG%" == "" @echo off
+@rem ##########################################################################
+@rem
+@rem  Gradle startup script for Windows
+@rem
+@rem ##########################################################################
+
+@rem Set local scope for the variables with windows NT shell
+if "%OS%"=="Windows_NT" setlocal
+
+set DIRNAME=%~dp0
+if "%DIRNAME%" == "" set DIRNAME=.
+set APP_BASE_NAME=%~n0
+set APP_HOME=%DIRNAME%
+
+@rem Resolve any "." and ".." in APP_HOME to make it shorter.
+for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
+
+@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
+set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
+
+@rem Find java.exe
+if defined JAVA_HOME goto findJavaFromJavaHome
+
+set JAVA_EXE=java.exe
+%JAVA_EXE% -version >NUL 2>&1
+if "%ERRORLEVEL%" == "0" goto init
+
+echo.
+echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:findJavaFromJavaHome
+set JAVA_HOME=%JAVA_HOME:"=%
+set JAVA_EXE=%JAVA_HOME%/bin/java.exe
+
+if exist "%JAVA_EXE%" goto init
+
+echo.
+echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
+echo.
+echo Please set the JAVA_HOME variable in your environment to match the
+echo location of your Java installation.
+
+goto fail
+
+:init
+@rem Get command-line arguments, handling Windows variants
+
+if not "%OS%" == "Windows_NT" goto win9xME_args
+
+:win9xME_args
+@rem Slurp the command line arguments.
+set CMD_LINE_ARGS=
+set _SKIP=2
+
+:win9xME_args_slurp
+if "x%~1" == "x" goto execute
+
+set CMD_LINE_ARGS=%*
+
+:execute
+@rem Setup the command line
+
+set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
+
+
+@rem Execute Gradle
+"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS%
+
+:end
+@rem End local scope for the variables with windows NT shell
+if "%ERRORLEVEL%"=="0" goto mainEnd
+
+:fail
+rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
+rem the _cmd.exe /c_ return code!
+if  not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
+exit /b 1
+
+:mainEnd
+if "%OS%"=="Windows_NT" endlocal
+
+:omega
diff --git a/theodolite-quarkus/settings.gradle b/theodolite-quarkus/settings.gradle
new file mode 100644
index 0000000000000000000000000000000000000000..8a0af4a6504623f11c275c06e538537726935255
--- /dev/null
+++ b/theodolite-quarkus/settings.gradle
@@ -0,0 +1,11 @@
+pluginManagement {
+    repositories {
+        mavenLocal()
+        mavenCentral()
+        gradlePluginPortal()
+    }
+    plugins {
+        id "${quarkusPluginId}" version "${quarkusPluginVersion}"
+    }
+}
+rootProject.name='theodolite-quarkus'
diff --git a/theodolite-quarkus/src/main/docker/Dockerfile.fast-jar b/theodolite-quarkus/src/main/docker/Dockerfile.fast-jar
new file mode 100644
index 0000000000000000000000000000000000000000..32a4443a5d0291c8190acf8e59419d3d4825b2fd
--- /dev/null
+++ b/theodolite-quarkus/src/main/docker/Dockerfile.fast-jar
@@ -0,0 +1,54 @@
+####
+# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
+#
+# Before building the container image run:
+#
+# ./gradlew build -Dquarkus.package.type=fast-jar
+#
+# Then, build the image with:
+#
+# docker build -f src/main/docker/Dockerfile.fast-jar -t quarkus/theodolite-quarkus-fast-jar .
+#
+# Then run the container using:
+#
+# docker run -i --rm -p 8080:8080 quarkus/theodolite-quarkus-fast-jar
+#
+# If you want to include the debug port into your docker image
+# you will have to expose the debug port (default 5005) like this :  EXPOSE 8080 5050
+#
+# Then run the container using :
+#
+# docker run -i --rm -p 8080:8080 -p 5005:5005 -e JAVA_ENABLE_DEBUG="true" quarkus/theodolite-quarkus-fast-jar
+#
+###
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3 
+
+ARG JAVA_PACKAGE=java-11-openjdk-headless
+ARG RUN_JAVA_VERSION=1.3.8
+ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'
+# Install java and the run-java script
+# Also set up permissions for user `1001`
+RUN microdnf install curl ca-certificates ${JAVA_PACKAGE} \
+    && microdnf update \
+    && microdnf clean all \
+    && mkdir /deployments \
+    && chown 1001 /deployments \
+    && chmod "g+rwX" /deployments \
+    && chown 1001:root /deployments \
+    && curl https://repo1.maven.org/maven2/io/fabric8/run-java-sh/${RUN_JAVA_VERSION}/run-java-sh-${RUN_JAVA_VERSION}-sh.sh -o /deployments/run-java.sh \
+    && chown 1001 /deployments/run-java.sh \
+    && chmod 540 /deployments/run-java.sh \
+    && echo "securerandom.source=file:/dev/urandom" >> /etc/alternatives/jre/lib/security/java.security
+
+# Configure the JAVA_OPTIONS, you can add -XshowSettings:vm to also display the heap size.
+ENV JAVA_OPTIONS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
+# We make four distinct layers so if there are application changes the library layers can be re-used
+COPY --chown=1001 build/quarkus-app/lib/ /deployments/lib/
+COPY --chown=1001 build/quarkus-app/*.jar /deployments/
+COPY --chown=1001 build/quarkus-app/app/ /deployments/app/
+COPY --chown=1001 build/quarkus-app/quarkus/ /deployments/quarkus/
+
+EXPOSE 8080
+USER 1001
+
+ENTRYPOINT [ "/deployments/run-java.sh" ]
diff --git a/theodolite-quarkus/src/main/docker/Dockerfile.jvm b/theodolite-quarkus/src/main/docker/Dockerfile.jvm
new file mode 100644
index 0000000000000000000000000000000000000000..6733d5d441e8292e02547cf59131c706575e9d86
--- /dev/null
+++ b/theodolite-quarkus/src/main/docker/Dockerfile.jvm
@@ -0,0 +1,52 @@
+####
+# This Dockerfile is used in order to build a container that runs the Quarkus application in JVM mode
+#
+# Before building the container image run:
+#
+# ./gradlew build
+#
+# Then, build the image with:
+#
+# docker build -f src/main/docker/Dockerfile.jvm -t quarkus/theodolite-quarkus-jvm .
+#
+# Then run the container using:
+#
+# docker run -i --rm -p 8080:8080 quarkus/theodolite-quarkus-jvm
+#
+# If you want to include the debug port into your docker image
+# you will have to expose the debug port (default 5005) like this :  EXPOSE 8080 5050
+#
+# Then run the container using :
+#
+# docker run -i --rm -p 8080:8080 -p 5005:5005 -e JAVA_ENABLE_DEBUG="true" quarkus/theodolite-quarkus-jvm
+#
+###
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3 
+
+ARG JAVA_PACKAGE=java-11-openjdk-headless
+ARG RUN_JAVA_VERSION=1.3.8
+ENV LANG='en_US.UTF-8' LANGUAGE='en_US:en'
+# Install java and the run-java script
+# Also set up permissions for user `1001`
+RUN microdnf install curl ca-certificates ${JAVA_PACKAGE} \
+    && microdnf update \
+    && microdnf clean all \
+    && mkdir /deployments \
+    && chown 1001 /deployments \
+    && chmod "g+rwX" /deployments \
+    && chown 1001:root /deployments \
+    && curl https://repo1.maven.org/maven2/io/fabric8/run-java-sh/${RUN_JAVA_VERSION}/run-java-sh-${RUN_JAVA_VERSION}-sh.sh -o /deployments/run-java.sh \
+    && chown 1001 /deployments/run-java.sh \
+    && chmod 540 /deployments/run-java.sh \
+    && echo "securerandom.source=file:/dev/urandom" >> /etc/alternatives/jre/lib/security/java.security
+
+# Configure the JAVA_OPTIONS, you can add -XshowSettings:vm to also display the heap size.
+ENV JAVA_OPTIONS="-Dquarkus.http.host=0.0.0.0 -Djava.util.logging.manager=org.jboss.logmanager.LogManager"
+COPY build/lib/* /deployments/lib/
+COPY build/*-runner.jar /deployments/app.jar
+COPY config/ /deployments/config/
+
+EXPOSE 8080
+USER 1001
+
+ENTRYPOINT [ "/deployments/run-java.sh" ]
diff --git a/theodolite-quarkus/src/main/docker/Dockerfile.native b/theodolite-quarkus/src/main/docker/Dockerfile.native
new file mode 100644
index 0000000000000000000000000000000000000000..29836a7148b573c3051c33341718b06008fa07e2
--- /dev/null
+++ b/theodolite-quarkus/src/main/docker/Dockerfile.native
@@ -0,0 +1,28 @@
+####
+# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode
+#
+# Before building the container image run:
+#
+# ./gradlew build -Dquarkus.package.type=native
+#
+# Then, build the image with:
+#
+# docker build -f src/main/docker/Dockerfile.native -t quarkus/theodolite-quarkus .
+#
+# Then run the container using:
+#
+# docker run -i --rm -p 8080:8080 quarkus/theodolite-quarkus
+#
+###
+FROM registry.access.redhat.com/ubi8/ubi-minimal:8.3
+WORKDIR /deployments
+RUN chown 1001 /deployments \
+    && chmod "g+rwX" /deployments \
+    && chown 1001:root /deployments
+COPY --chown=1001:root build/*-runner /deployments/application
+COPY config/ /deployments/config/
+
+EXPOSE 8080
+USER 1001
+
+CMD ["./application", "-Dquarkus.http.host=0.0.0.0"]
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/Benchmark.kt b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/Benchmark.kt
new file mode 100644
index 0000000000000000000000000000000000000000..d57a28e8bbcf4dc101e4814ecaa0d52fe28c08a9
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/Benchmark.kt
@@ -0,0 +1,31 @@
+package theodolite.benchmark
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.ConfigurationOverride
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+/**
+ * A Benchmark contains:
+ * - The [Resource]s that can be scaled for the benchmark.
+ * - The [LoadDimension]s that can be scaled the benchmark.
+ * - additional [ConfigurationOverride]s.
+ */
+@RegisterForReflection
+interface Benchmark {
+
+    /**
+     * Builds a Deployment that can be deployed.
+     * @return a BenchmarkDeployment.
+     */
+    fun buildDeployment(
+        load: LoadDimension,
+        res: Resource,
+        configurationOverrides: List<ConfigurationOverride?>,
+        loadGenerationDelay: Long,
+        afterTeardownDelay: Long
+    ): BenchmarkDeployment
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/BenchmarkDeployment.kt b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/BenchmarkDeployment.kt
new file mode 100644
index 0000000000000000000000000000000000000000..92d3f7a012517895fc61531026e4ea4f3e3cfb50
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/BenchmarkDeployment.kt
@@ -0,0 +1,20 @@
+package theodolite.benchmark
+
+/**
+ *  A BenchmarkDeployment contains the necessary infrastructure to execute a benchmark.
+ *  Therefore it has the capabilities to set up the deployment of a benchmark and to tear it down.
+ */
+interface BenchmarkDeployment {
+
+    /**
+     * Setup a benchmark. This method is responsible for deploying the resources
+     * and organize the needed infrastructure.
+     */
+    fun setup()
+
+    /**
+     *  Tears down a benchmark. This method is responsible for deleting the deployed
+     *  resources and to reset the used infrastructure.
+     */
+    fun teardown()
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/BenchmarkExecution.kt b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/BenchmarkExecution.kt
new file mode 100644
index 0000000000000000000000000000000000000000..62ab75898d16ff2732ab6aa5c254ec8f87fb7266
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/BenchmarkExecution.kt
@@ -0,0 +1,92 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.ConfigurationOverride
+import kotlin.properties.Delegates
+
+/**
+ * This class represents the configuration for an execution of a benchmark.
+ * An example for this is the BenchmarkExecution.yaml
+ * A BenchmarkExecution consists of:
+ *  - A [name].
+ *  - The [benchmark] that should be executed.
+ *  - The [load] that should be checked in the benchmark.
+ *  - The [resources] that should be checked in the benchmark.
+ *  - A list of [slos] that are used for the evaluation of the experiments.
+ *  - An [execution] that encapsulates: the strategy, the duration, and the restrictions
+ *  for the execution of the benchmark.
+ *  - [configOverrides] additional configurations.
+ *  This class is used for parsing(in [theodolite.execution.TheodoliteYamlExecutor]) and
+ *  for the deserializing in the [theodolite.execution.operator.TheodoliteOperator].
+ *  @constructor construct an empty BenchmarkExecution.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class BenchmarkExecution : KubernetesResource {
+    var executionId: Int = 0
+    lateinit var name: String
+    lateinit var benchmark: String
+    lateinit var load: LoadDefinition
+    lateinit var resources: ResourceDefinition
+    lateinit var slos: List<Slo>
+    lateinit var execution: Execution
+    lateinit var configOverrides: MutableList<ConfigurationOverride?>
+
+    /**
+     * This execution encapsulates the [strategy], the [duration], the [repetitions], and the [restrictions]
+     *  which are used for the concrete benchmark experiments.
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class Execution : KubernetesResource {
+        lateinit var strategy: String
+        var duration by Delegates.notNull<Long>()
+        var repetitions by Delegates.notNull<Int>()
+        lateinit var restrictions: List<String>
+        var loadGenerationDelay = 0L
+        var afterTeardownDelay = 5L
+    }
+
+    /**
+     * Measurable metric.
+     * [sloType] determines the type of the metric.
+     * It is evaluated using the [theodolite.evaluation.ExternalSloChecker] by data measured by Prometheus.
+     * The evaluation checks if a [threshold] is reached or not.
+     * [offset] determines the shift in hours by which the start and end timestamps should be shifted.
+     * The [warmup] determines after which time the metric should be evaluated to avoid starting interferences.
+     * The [warmup] time unit depends on the Slo: for the lag trend it is in seconds.
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class Slo : KubernetesResource {
+        lateinit var sloType: String
+        var threshold by Delegates.notNull<Int>()
+        lateinit var prometheusUrl: String
+        lateinit var externalSloUrl: String
+        var offset by Delegates.notNull<Int>()
+        var warmup by Delegates.notNull<Int>()
+    }
+
+    /**
+     * Represents a Load that should be created and checked.
+     * It can be set to [loadValues].
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class LoadDefinition : KubernetesResource {
+        lateinit var loadType: String
+        lateinit var loadValues: List<Int>
+    }
+
+    /**
+     * Represents a resource that can be scaled to [resourceValues].
+     */
+    @JsonDeserialize
+    @RegisterForReflection
+    class ResourceDefinition : KubernetesResource {
+        lateinit var resourceType: String
+        lateinit var resourceValues: List<Int>
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KafkaLagExporterRemover.kt b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KafkaLagExporterRemover.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e8179b42d40e40e7ed45a8f5c48fe26f235be334
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KafkaLagExporterRemover.kt
@@ -0,0 +1,22 @@
+package theodolite.benchmark
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import mu.KotlinLogging
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Used to reset the KafkaLagExporter by deleting the pod.
+ * @param client NamespacedKubernetesClient used for the deletion.
+ */
+class KafkaLagExporterRemover(private val client: NamespacedKubernetesClient) {
+
+    /**
+     * Deletes all pods with the selected label.
+     * @param [label] of the pod that should be deleted.
+     */
+    fun remove(label: String) {
+        this.client.pods().withLabel(label).delete()
+        logger.info { "Pod with label: $label deleted" }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KubernetesBenchmark.kt b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KubernetesBenchmark.kt
new file mode 100644
index 0000000000000000000000000000000000000000..aa9c36ad912437e3b104dccf6ff1f4dea5905946
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KubernetesBenchmark.kt
@@ -0,0 +1,113 @@
+package theodolite.benchmark
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.k8s.K8sResourceLoader
+import theodolite.patcher.PatcherFactory
+import theodolite.util.*
+
+private val logger = KotlinLogging.logger {}
+
+private var DEFAULT_NAMESPACE = "default"
+
+/**
+ * Represents a benchmark in Kubernetes. An example for this is the BenchmarkType.yaml
+ * Contains a of:
+ * - [name] of the benchmark,
+ * - [appResource] list of the resources that have to be deployed for the benchmark,
+ * - [loadGenResource] resource that generates the load,
+ * - [resourceTypes] types of scaling resources,
+ * - [loadTypes] types of loads that can be scaled for the benchmark,
+ * - [kafkaConfig] for the [theodolite.k8s.TopicManager],
+ * - [namespace] for the client,
+ * - [path] under which the resource yamls can be found.
+ *
+ *  This class is used for the parsing(in the [theodolite.execution.TheodoliteYamlExecutor]) and
+ *  for the deserializing in the [theodolite.execution.operator.TheodoliteOperator].
+ * @constructor construct an empty Benchmark.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class KubernetesBenchmark: KubernetesResource, Benchmark{
+    lateinit var name: String
+    lateinit var appResource: List<String>
+    lateinit var loadGenResource: List<String>
+    lateinit var resourceTypes: List<TypeName>
+    lateinit var loadTypes: List<TypeName>
+    lateinit var kafkaConfig: KafkaConfig
+    var namespace = System.getenv("NAMESPACE") ?: DEFAULT_NAMESPACE
+    var path =  System.getenv("THEODOLITE_APP_RESOURCES") ?: "./config"
+
+
+    /**
+     * Loads [KubernetesResource]s.
+     * It first loads them via the [YamlParser] to check for their concrete type and afterwards initializes them using
+     * the [K8sResourceLoader]
+     */
+    private fun loadKubernetesResources(resources: List<String>): List<Pair<String, KubernetesResource>> {
+        val parser = YamlParser()
+        val loader = K8sResourceLoader(DefaultKubernetesClient().inNamespace(namespace))
+        return resources
+            .map { resource ->
+                val resourcePath = "$path/$resource"
+                val kind = parser.parse(resourcePath, HashMap<String, String>()::class.java)?.get("kind")!!
+                val k8sResource = loader.loadK8sResource(kind, resourcePath)
+                Pair(resource, k8sResource)
+            }
+    }
+
+    /**
+     * Builds a deployment.
+     * First loads all required resources and then patches them to the concrete load and resources for the experiment.
+     * Afterwards patches additional configurations(cluster depending) into the resources.
+     * @param load concrete load that will be benchmarked in this experiment.
+     * @param res concrete resource that will be scaled for this experiment.
+     * @param configurationOverrides
+     * @return a [BenchmarkDeployment]
+     */
+    override fun buildDeployment(
+        load: LoadDimension,
+        res: Resource,
+        configurationOverrides: List<ConfigurationOverride?>,
+        loadGenerationDelay: Long,
+        afterTeardownDelay: Long
+    ): BenchmarkDeployment {
+        logger.info { "Using $namespace as namespace." }
+        logger.info { "Using $path as resource path." }
+
+        val appResources = loadKubernetesResources(this.appResource)
+        val loadGenResources = loadKubernetesResources(this.loadGenResource)
+
+        val patcherFactory = PatcherFactory()
+
+        // patch the load dimension the resources
+        load.getType().forEach { patcherDefinition ->
+            patcherFactory.createPatcher(patcherDefinition, loadGenResources).patch(load.get().toString())
+        }
+        res.getType().forEach { patcherDefinition ->
+            patcherFactory.createPatcher(patcherDefinition, appResources).patch(res.get().toString())
+        }
+
+        // Patch the given overrides
+        configurationOverrides.forEach { override ->
+            override?.let {
+                patcherFactory.createPatcher(it.patcher, appResources + loadGenResources).patch(override.value)
+            }
+        }
+        return KubernetesBenchmarkDeployment(
+            namespace = namespace,
+            appResources = appResources.map { it.second },
+            loadGenResources = loadGenResources.map { it.second },
+            loadGenerationDelay = loadGenerationDelay,
+            afterTeardownDelay = afterTeardownDelay,
+            kafkaConfig = hashMapOf("bootstrap.servers" to kafkaConfig.bootstrapServer),
+            topics = kafkaConfig.topics,
+            client = DefaultKubernetesClient().inNamespace(namespace)
+        )
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KubernetesBenchmarkDeployment.kt b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KubernetesBenchmarkDeployment.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6cf239676ddb24752f4754a85fc62657f9eb6603
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/benchmark/KubernetesBenchmarkDeployment.kt
@@ -0,0 +1,67 @@
+package theodolite.benchmark
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import org.apache.kafka.clients.admin.NewTopic
+import theodolite.k8s.K8sManager
+import theodolite.k8s.TopicManager
+import theodolite.util.KafkaConfig
+import java.time.Duration
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Organizes the deployment of benchmarks in Kubernetes.
+ *
+ * @param namespace to operate in.
+ * @param resources List of [KubernetesResource] that are managed.
+ * @param kafkaConfig for the organization of Kafka topics.
+ * @param topics List of topics that are created or deleted.
+ */
+@RegisterForReflection
+class KubernetesBenchmarkDeployment(
+    val namespace: String,
+    val appResources: List<KubernetesResource>,
+    val loadGenResources: List<KubernetesResource>,
+    private val loadGenerationDelay: Long,
+    private val afterTeardownDelay: Long,
+    private val kafkaConfig: HashMap<String, Any>,
+    private val topics: List<KafkaConfig.TopicWrapper>,
+    private val client: NamespacedKubernetesClient
+) : BenchmarkDeployment {
+    private val kafkaController = TopicManager(this.kafkaConfig)
+    private val kubernetesManager = K8sManager(client)
+    private val LAG_EXPORTER_POD_LABEL = "app.kubernetes.io/name=kafka-lag-exporter"
+
+    /**
+     * Setup a [KubernetesBenchmark] using the [TopicManager] and the [K8sManager]:
+     *  - Create the needed topics.
+     *  - Deploy the needed resources.
+     */
+    override fun setup() {
+        val kafkaTopics = this.topics.filter { !it.removeOnly }
+            .map { NewTopic(it.name, it.numPartitions, it.replicationFactor) }
+        kafkaController.createTopics(kafkaTopics)
+        appResources.forEach { kubernetesManager.deploy(it) }
+        logger.info { "Wait ${this.loadGenerationDelay} seconds before starting the load generator." }
+        Thread.sleep(Duration.ofSeconds(this.loadGenerationDelay).toMillis())
+        loadGenResources.forEach { kubernetesManager.deploy(it) }
+    }
+
+    /**
+     * Tears down a [KubernetesBenchmark]:
+     *  - Reset the Kafka Lag Exporter.
+     *  - Remove the used topics.
+     *  - Remove the [KubernetesResource]s.
+     */
+    override fun teardown() {
+        loadGenResources.forEach { kubernetesManager.remove(it) }
+        appResources.forEach { kubernetesManager.remove(it) }
+        kafkaController.removeTopics(this.topics.map { topic -> topic.name })
+        KafkaLagExporterRemover(client).remove(LAG_EXPORTER_POD_LABEL)
+        logger.info { "Teardown complete. Wait $afterTeardownDelay ms to let everything come down." }
+        Thread.sleep(Duration.ofSeconds(afterTeardownDelay).toMillis())
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/AnalysisExecutor.kt b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/AnalysisExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ef4d371173c7099eb091f90cddbe26d31e6522be
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/AnalysisExecutor.kt
@@ -0,0 +1,84 @@
+package theodolite.evaluation
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.util.IOHandler
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import java.text.Normalizer
+import java.time.Duration
+import java.time.Instant
+import java.util.*
+import java.util.regex.Pattern
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Contains the analysis. Fetches a metric from Prometheus, documents it, and evaluates it.
+ * @param slo Slo that is used for the analysis.
+ */
+class AnalysisExecutor(
+    private val slo: BenchmarkExecution.Slo,
+    private val executionId: Int
+) {
+
+    private val fetcher = MetricFetcher(
+        prometheusURL = slo.prometheusUrl,
+        offset = Duration.ofHours(slo.offset.toLong())
+    )
+
+    /**
+     *  Analyses an experiment via prometheus data.
+     *  First fetches data from prometheus, then documents them and afterwards evaluate it via a [slo].
+     *  @param load of the experiment.
+     *  @param res of the experiment.
+     *  @param executionDuration of the experiment.
+     *  @return true if the experiment succeeded.
+     */
+    fun analyze(load: LoadDimension, res: Resource, executionIntervals: List<Pair<Instant, Instant>>): Boolean {
+        var result = false
+        var repetitionCounter = 1
+
+        try {
+            val ioHandler = IOHandler()
+            val resultsFolder: String = ioHandler.getResultFolderURL()
+            val fileURL = "${resultsFolder}exp${executionId}_${load.get()}_${res.get()}_${slo.sloType.toSlug()}"
+
+            val prometheusData = executionIntervals
+                .map { interval -> fetcher.fetchMetric(
+                        start = interval.first,
+                        end = interval.second,
+                        query = "sum by(group)(kafka_consumergroup_group_lag >= 0)") }
+
+            prometheusData.forEach{ data ->
+                ioHandler.writeToCSVFile(
+                    fileURL = "${fileURL}_${repetitionCounter++}",
+                    data = data.getResultAsList(),
+                    columns = listOf("group", "timestamp", "value"))
+            }
+
+            val sloChecker = SloCheckerFactory().create(
+                sloType = slo.sloType,
+                externalSlopeURL = slo.externalSloUrl,
+                threshold = slo.threshold,
+                warmup = slo.warmup
+            )
+
+            result = sloChecker.evaluate(prometheusData)
+
+        } catch (e: Exception) {
+            logger.error { "Evaluation failed for resource '${res.get()}' and load '${load.get()}'. Error: $e" }
+        }
+        return result
+    }
+
+    private val NONLATIN: Pattern = Pattern.compile("[^\\w-]")
+    private val WHITESPACE: Pattern = Pattern.compile("[\\s]")
+
+    fun String.toSlug(): String {
+        val noWhitespace: String = WHITESPACE.matcher(this).replaceAll("-")
+        val normalized: String = Normalizer.normalize(noWhitespace, Normalizer.Form.NFD)
+        val slug: String = NONLATIN.matcher(normalized).replaceAll("")
+        return slug.toLowerCase(Locale.ENGLISH)
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/ExternalSloChecker.kt b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/ExternalSloChecker.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f7ebee8faf740583dbe6a37381a599e9bde19280
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/ExternalSloChecker.kt
@@ -0,0 +1,59 @@
+package theodolite.evaluation
+
+import com.google.gson.Gson
+import khttp.post
+import mu.KotlinLogging
+import theodolite.util.PrometheusResponse
+import java.net.ConnectException
+import java.time.Instant
+
+/**
+ * [SloChecker] that uses an external source for the concrete evaluation.
+ * @param externalSlopeURL The url under which the external evaluation can be reached.
+ * @param threshold threshold that should not be exceeded to evaluate to true.
+ * @param warmup time that is not taken into consideration for the evaluation.
+ */
+class ExternalSloChecker(
+    private val externalSlopeURL: String,
+    private val threshold: Int,
+    private val warmup: Int
+) : SloChecker {
+
+    private val RETRIES = 2
+    private val TIMEOUT = 60.0
+
+    private val logger = KotlinLogging.logger {}
+
+    /**
+     * Evaluates an experiment using an external service.
+     * Will try to reach the external service until success or [RETRIES] times.
+     * Each request will timeout after [TIMEOUT].
+     *
+     * @param start point of the experiment.
+     * @param end point of the experiment.
+     * @param fetchedData that should be evaluated
+     * @return true if the experiment was successful(the threshold was not exceeded.
+     * @throws ConnectException if the external service could not be reached.
+     */
+    override fun evaluate(fetchedData: List<PrometheusResponse>): Boolean {
+        var counter = 0
+        val data = Gson().toJson(mapOf(
+            "total_lags" to fetchedData.map { it.data?.result},
+            "threshold" to threshold,
+            "warmup" to warmup))
+
+        while (counter < RETRIES) {
+            val result = post(externalSlopeURL, data = data, timeout = TIMEOUT)
+            if (result.statusCode != 200) {
+                counter++
+                logger.error { "Could not reach external SLO checker" }
+            } else {
+                val booleanResult = result.text.toBoolean()
+                logger.info { "SLO checker result is: $booleanResult" }
+                return booleanResult
+            }
+        }
+
+        throw ConnectException("Could not reach external SLO checker")
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/MetricFetcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/MetricFetcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..833d7d1e16c2fbc91b58817b319a7d02af7f5b2b
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/MetricFetcher.kt
@@ -0,0 +1,76 @@
+package theodolite.evaluation
+
+import com.google.gson.Gson
+import khttp.get
+import khttp.responses.Response
+import mu.KotlinLogging
+import theodolite.util.PrometheusResponse
+import java.net.ConnectException
+import java.time.Duration
+import java.time.Instant
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Used to fetch metrics from Prometheus.
+ * @param prometheusURL URL to the Prometheus server.
+ * @param offset Duration of time that the start and end points of the queries
+ * should be shifted. (for different timezones, etc..)
+ */
+class MetricFetcher(private val prometheusURL: String, private val offset: Duration) {
+    private val RETRIES = 2
+    private val TIMEOUT = 60.0
+
+    /**
+     * Tries to fetch a metric by a query to a Prometheus server.
+     * Retries to fetch the metric [RETRIES] times.
+     * Connects to the server via [prometheusURL].
+     *
+     * @param start start point of the query.
+     * @param end end point of the query.
+     * @param query query for the prometheus server.
+     * @throws ConnectException - if the prometheus server timed out/was not reached.
+     */
+    fun fetchMetric(start: Instant, end: Instant, query: String): PrometheusResponse {
+
+        val offsetStart = start.minus(offset)
+        val offsetEnd = end.minus(offset)
+
+        var counter = 0
+        val parameter = mapOf(
+            "query" to query,
+            "start" to offsetStart.toString(),
+            "end" to offsetEnd.toString(),
+            "step" to "5s"
+        )
+
+        while (counter < RETRIES) {
+            val response = get("$prometheusURL/api/v1/query_range", params = parameter, timeout = TIMEOUT)
+            if (response.statusCode != 200) {
+                val message = response.jsonObject.toString()
+                logger.warn { "Could not connect to Prometheus: $message. Retrying now." }
+                counter++
+            } else {
+                val values = parseValues(response)
+                if (values.data?.result.isNullOrEmpty()) {
+                    logger.error { "Empty query result: $values between $start and $end for query $query." }
+                    throw NoSuchFieldException()
+                }
+                return parseValues(response)
+            }
+        }
+        throw ConnectException("No answer from Prometheus received.")
+    }
+
+    /**
+     * Deserializes a response from Prometheus.
+     * @param values Response from Prometheus.
+     * @return a [PrometheusResponse]
+     */
+    private fun parseValues(values: Response): PrometheusResponse {
+        return Gson().fromJson<PrometheusResponse>(
+            values.jsonObject.toString(),
+            PrometheusResponse::class.java
+        )
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/SloChecker.kt b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/SloChecker.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9ee5fe7ef34ce5b6214882ce2c1d19677f1d7130
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/SloChecker.kt
@@ -0,0 +1,20 @@
+package theodolite.evaluation
+
+import theodolite.util.PrometheusResponse
+
+/**
+ * A SloChecker can be used to evaluate data from Prometheus.
+ * @constructor Creates an empty SloChecker
+ */
+interface SloChecker {
+    /**
+     * Evaluates [fetchedData] and returns if the experiment was successful.
+     * Returns if the evaluated experiment was successful.
+     *
+     * @param start of the experiment
+     * @param end of the experiment
+     * @param fetchedData from Prometheus that will be evaluated.
+     * @return true if experiment was successful. Otherwise false.
+     */
+    fun evaluate(fetchedData: List<PrometheusResponse>): Boolean
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/SloCheckerFactory.kt b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/SloCheckerFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..20c421acdfcd76f5d2ebc2ab2c30142bcca3841a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/evaluation/SloCheckerFactory.kt
@@ -0,0 +1,36 @@
+package theodolite.evaluation
+
+/**
+ * Factory used to potentially create different [SloChecker]s.
+ * Supports: lag type.
+ */
+class SloCheckerFactory {
+
+    /**
+     * Creates different [SloChecker]s.
+     * Supports: lag type.
+     *
+     * @param sloType Type of the [SloChecker].
+     * @param externalSlopeURL Url to the concrete [SloChecker].
+     * @param threshold for the [SloChecker].
+     * @param warmup for the [SloChecker].
+     *
+     * @return A [SloChecker]
+     * @throws IllegalArgumentException If [sloType] not supported.
+     */
+    fun create(
+        sloType: String,
+        externalSlopeURL: String,
+        threshold: Int,
+        warmup: Int
+    ): SloChecker {
+        return when (sloType) {
+            "lag trend" -> ExternalSloChecker(
+                externalSlopeURL = externalSlopeURL,
+                threshold = threshold,
+                warmup = warmup
+            )
+            else -> throw IllegalArgumentException("Slotype $sloType not found.")
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/BenchmarkExecutor.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/BenchmarkExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e7b511d8c83b5abccece1204aad2a4a9ecfdfd26
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/BenchmarkExecutor.kt
@@ -0,0 +1,69 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.util.ConfigurationOverride
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+import java.time.Duration
+import java.util.concurrent.atomic.AtomicBoolean
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The Benchmark Executor runs a single experiment.
+ *
+ * @property benchmark
+ * @property results
+ * @property executionDuration
+ * @constructor Create empty Benchmark executor
+ */
+abstract class BenchmarkExecutor(
+    val benchmark: Benchmark,
+    val results: Results,
+    val executionDuration: Duration,
+    val configurationOverrides: List<ConfigurationOverride?>,
+    val slo: BenchmarkExecution.Slo,
+    val repetitions: Int,
+    val executionId: Int,
+    val loadGenerationDelay: Long,
+    val afterTeardownDelay: Long
+) {
+
+    var run: AtomicBoolean = AtomicBoolean(true)
+
+    /**
+     * Run a experiment for the given parametrization, evaluate the
+     * experiment and save the result.
+     *
+     * @param load load to be tested.
+     * @param res resources to be tested.
+     * @return True, if the number of resources are suitable for the
+     *     given load, false otherwise.
+     */
+    abstract fun runExperiment(load: LoadDimension, res: Resource): Boolean
+
+    /**
+     * Wait while the benchmark is running and log the number of minutes executed every 1 minute.
+     *
+     */
+    fun waitAndLog() {
+        logger.info { "Execution of a new experiment started." }
+
+        var secondsRunning = 0L
+
+        while (run.get() && secondsRunning < executionDuration.toSeconds()) {
+            secondsRunning++
+            Thread.sleep(Duration.ofSeconds(1).toMillis())
+
+            if ((secondsRunning % 60) == 0L) {
+                logger.info { "Executed: ${secondsRunning / 60} minutes." }
+            }
+        }
+
+        logger.debug { "Executor shutdown gracefully." }
+
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/BenchmarkExecutorImpl.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/BenchmarkExecutorImpl.kt
new file mode 100644
index 0000000000000000000000000000000000000000..3afc85f0a8cb67011763498a662b447ce2c07f0f
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/BenchmarkExecutorImpl.kt
@@ -0,0 +1,73 @@
+package theodolite.execution
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import mu.KotlinLogging
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.evaluation.AnalysisExecutor
+import theodolite.util.*
+import java.time.Duration
+import java.time.Instant
+
+private val logger = KotlinLogging.logger {}
+
+@RegisterForReflection
+class BenchmarkExecutorImpl(
+    benchmark: Benchmark,
+    results: Results,
+    executionDuration: Duration,
+    configurationOverrides: List<ConfigurationOverride?>,
+    slo: BenchmarkExecution.Slo,
+    repetitions: Int,
+    executionId: Int,
+    loadGenerationDelay: Long,
+    afterTeardownDelay: Long
+) : BenchmarkExecutor(benchmark, results, executionDuration, configurationOverrides, slo, repetitions, executionId, loadGenerationDelay, afterTeardownDelay) {
+    override fun runExperiment(load: LoadDimension, res: Resource): Boolean {
+        var result = false
+        val executionIntervals: MutableList<Pair<Instant, Instant>> = ArrayList()
+
+        for (i in 1.rangeTo(repetitions)) {
+            logger.info { "Run repetition $i/$repetitions" }
+            if (this.run.get()) {
+                executionIntervals.add(runSingleExperiment(load,res))
+            } else {
+                break
+            }
+        }
+
+        /**
+         * Analyse the experiment, if [run] is true, otherwise the experiment was canceled by the user.
+         */
+        if (this.run.get()) {
+            result =AnalysisExecutor(slo = slo, executionId = executionId)
+                    .analyze(
+                        load = load,
+                        res = res,
+                        executionIntervals = executionIntervals)
+            this.results.setResult(Pair(load, res), result)
+        }
+        return result
+    }
+
+    private fun runSingleExperiment(load: LoadDimension, res: Resource): Pair<Instant, Instant> {
+        val benchmarkDeployment = benchmark.buildDeployment(load, res, this.configurationOverrides, this.loadGenerationDelay, this.afterTeardownDelay)
+        val from = Instant.now()
+        try {
+            benchmarkDeployment.setup()
+            this.waitAndLog()
+        } catch (e: Exception) {
+            logger.error { "Error while setup experiment." }
+            logger.error { "Error is: $e" }
+            this.run.set(false)
+        }
+        val to = Instant.now()
+        try {
+            benchmarkDeployment.teardown()
+        } catch (e: Exception) {
+            logger.warn { "Error while tearing down the benchmark deployment." }
+            logger.debug { "Teardown failed, caused by: $e" }
+        }
+        return Pair(from,to)
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/Main.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/Main.kt
new file mode 100644
index 0000000000000000000000000000000000000000..bf883529967a8b24229fe8256ba0e4edd11b342c
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/Main.kt
@@ -0,0 +1,29 @@
+package theodolite.execution
+
+import io.quarkus.runtime.annotations.QuarkusMain
+import mu.KotlinLogging
+import theodolite.execution.operator.TheodoliteOperator
+import kotlin.system.exitProcess
+
+private val logger = KotlinLogging.logger {}
+
+@QuarkusMain
+object Main {
+
+    @JvmStatic
+    fun main(args: Array<String>) {
+
+        val mode = System.getenv("MODE") ?: "standalone"
+        logger.info { "Start Theodolite with mode $mode" }
+
+        when (mode) {
+            "standalone" -> TheodoliteYamlExecutor().start()
+            "yaml-executor" -> TheodoliteYamlExecutor().start() // TODO remove (#209)
+            "operator" -> TheodoliteOperator().start()
+            else -> {
+                logger.error { "MODE $mode not found" }
+                exitProcess(1)
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/Shutdown.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/Shutdown.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0ff8379a0af4b11154214dde021d7c60609631d1
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/Shutdown.kt
@@ -0,0 +1,46 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import java.lang.Exception
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * This Shutdown Hook can be used to delete all Kubernetes resources which are related to the given execution and benchmark.
+ *
+ * @property benchmarkExecution
+ * @property benchmark
+ */
+class Shutdown(private val benchmarkExecution: BenchmarkExecution, private val benchmark: KubernetesBenchmark) :
+    Thread() {
+
+    /**
+     * Run
+     * Delete all Kubernetes resources which are related to the execution and the benchmark.
+     */
+    override fun run() {
+        // Build Configuration to teardown
+        try {
+        logger.info { "Received shutdown signal -> Shutting down" }
+        val deployment =
+            benchmark.buildDeployment(
+                load = LoadDimension(0, emptyList()),
+                res = Resource(0, emptyList()),
+                configurationOverrides = benchmarkExecution.configOverrides,
+                loadGenerationDelay = 0L,
+                afterTeardownDelay = 5L
+            )
+            deployment.teardown()
+        } catch (e: Exception) {
+            logger.warn { "Could not delete all specified resources from Kubernetes. " +
+                    "This could be the case, if not all resources are deployed and running." }
+
+        }
+        logger.info { "Teardown everything deployed" }
+        logger.info { "Teardown completed" }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..c73aaae08489c25a40163d4edb1607247fae010a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/TheodoliteExecutor.kt
@@ -0,0 +1,141 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.patcher.PatcherDefinitionFactory
+import theodolite.strategies.StrategyFactory
+import theodolite.strategies.searchstrategy.CompositeStrategy
+import theodolite.util.*
+import java.io.File
+import java.time.Duration
+
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The Theodolite executor runs all the experiments defined with the given execution and benchmark configuration.
+ *
+ * @property config Configuration of a execution
+ * @property kubernetesBenchmark Configuration of a benchmark
+ * @constructor Create empty Theodolite executor
+ */
+class TheodoliteExecutor(
+    private val config: BenchmarkExecution,
+    private val kubernetesBenchmark: KubernetesBenchmark
+) {
+    /**
+     * An executor object, configured with the specified benchmark, evaluation method, experiment duration
+     * and overrides which are given in the execution.
+     */
+    lateinit var executor: BenchmarkExecutor
+
+    /**
+     * Creates all required components to start Theodolite.
+     *
+     * @return a [Config], that contains a list of [LoadDimension]s,
+     *          a list of [Resource]s , and the [CompositeStrategy].
+     * The [CompositeStrategy] is configured and able to find the minimum required resource for the given load.
+     */
+    private fun buildConfig(): Config {
+        val results = Results()
+        val strategyFactory = StrategyFactory()
+
+        val executionDuration = Duration.ofSeconds(config.execution.duration)
+
+        val resourcePatcherDefinition =
+            PatcherDefinitionFactory().createPatcherDefinition(
+                config.resources.resourceType,
+                this.kubernetesBenchmark.resourceTypes
+            )
+
+        val loadDimensionPatcherDefinition =
+            PatcherDefinitionFactory().createPatcherDefinition(
+                config.load.loadType,
+                this.kubernetesBenchmark.loadTypes
+            )
+
+        executor =
+            BenchmarkExecutorImpl(
+                benchmark = kubernetesBenchmark,
+                results = results,
+                executionDuration = executionDuration,
+                configurationOverrides = config.configOverrides,
+                slo = config.slos[0],
+                repetitions = config.execution.repetitions,
+                executionId = config.executionId,
+                loadGenerationDelay = config.execution.loadGenerationDelay,
+                afterTeardownDelay = config.execution.afterTeardownDelay
+            )
+
+        if (config.load.loadValues != config.load.loadValues.sorted()) {
+            config.load.loadValues = config.load.loadValues.sorted()
+            logger.info { "Load values are not sorted correctly, Theodolite sorts them in ascending order." +
+                    "New order is: ${config.load.loadValues}" }
+        }
+
+        if (config.resources.resourceValues != config.resources.resourceValues.sorted()) {
+            config.resources.resourceValues = config.resources.resourceValues.sorted()
+            logger.info { "Load values are not sorted correctly, Theodolite sorts them in ascending order." +
+                    "New order is: ${config.resources.resourceValues}" }
+        }
+
+        return Config(
+            loads = config.load.loadValues.map { load -> LoadDimension(load, loadDimensionPatcherDefinition) },
+            resources = config.resources.resourceValues.map { resource ->
+                Resource(
+                    resource,
+                    resourcePatcherDefinition
+                )
+            },
+            compositeStrategy = CompositeStrategy(
+                benchmarkExecutor = executor,
+                searchStrategy = strategyFactory.createSearchStrategy(executor, config.execution.strategy),
+                restrictionStrategies = strategyFactory.createRestrictionStrategy(
+                    results,
+                    config.execution.restrictions
+                )
+            )
+        )
+    }
+
+    fun getExecution(): BenchmarkExecution {
+        return this.config
+    }
+
+    fun getBenchmark(): KubernetesBenchmark {
+        return this.kubernetesBenchmark
+    }
+
+    /**
+     * Run all experiments which are specified in the corresponding
+     * execution and benchmark objects.
+     */
+    fun run() {
+        val ioHandler = IOHandler()
+        val resultsFolder = ioHandler.getResultFolderURL()
+        this.config.executionId = getAndIncrementExecutionID(resultsFolder+"expID.txt")
+        ioHandler.writeToJSONFile(this.config, "$resultsFolder${this.config.executionId}-execution-configuration")
+        ioHandler.writeToJSONFile(kubernetesBenchmark, "$resultsFolder${this.config.executionId}-benchmark-configuration")
+
+        val config = buildConfig()
+        // execute benchmarks for each load
+        for (load in config.loads) {
+            if (executor.run.get()) {
+                config.compositeStrategy.findSuitableResource(load, config.resources)
+            }
+        }
+        ioHandler.writeToJSONFile(config.compositeStrategy.benchmarkExecutor.results, "$resultsFolder${this.config.executionId}-result")
+    }
+
+   private fun getAndIncrementExecutionID(fileURL: String): Int {
+       val ioHandler = IOHandler()
+       var executionID = 0
+       if (File(fileURL).exists()) {
+           executionID = ioHandler.readFileAsString(fileURL).toInt() + 1
+       }
+       ioHandler.writeStringToTextFile(fileURL, (executionID).toString())
+       return executionID
+    }
+
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/TheodoliteYamlExecutor.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/TheodoliteYamlExecutor.kt
new file mode 100644
index 0000000000000000000000000000000000000000..b9977029703c8012ada7fb3d7766bfa321a836c3
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/TheodoliteYamlExecutor.kt
@@ -0,0 +1,58 @@
+package theodolite.execution
+
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.util.YamlParser
+import kotlin.concurrent.thread
+import kotlin.system.exitProcess
+
+private val logger = KotlinLogging.logger {}
+
+
+/**
+ * The Theodolite yaml executor loads the required configurations
+ * of the executions and the benchmark from yaml files and run the
+ * corresponding experiments.
+ *
+ * The location of the execution, benchmarks and Kubernetes resource
+ * files can be configured via the following environment variables:
+ * `THEODOLITE_EXECUTION`
+ *
+ * `THEODOLITE_BENCHMARK`
+ *
+ * `THEODOLITE_APP_RESOURCES`
+ *
+ * @constructor Create empty Theodolite yaml executor
+ */
+class TheodoliteYamlExecutor {
+    private val parser = YamlParser()
+
+    fun start() {
+        logger.info { "Theodolite started" }
+
+        val executionPath = System.getenv("THEODOLITE_EXECUTION") ?: "./config/example-execution-yaml-resource.yaml"
+        val benchmarkPath = System.getenv("THEODOLITE_BENCHMARK") ?: "./config/example-benchmark-yaml-resource.yaml"
+
+        logger.info { "Using $executionPath for BenchmarkExecution" }
+        logger.info { "Using $benchmarkPath for BenchmarkType" }
+
+
+        // load the BenchmarkExecution and the BenchmarkType
+        val benchmarkExecution =
+            parser.parse(path = executionPath, E = BenchmarkExecution::class.java)!!
+        val benchmark =
+            parser.parse(path = benchmarkPath, E = KubernetesBenchmark::class.java)!!
+
+        // Add shutdown hook
+        // Use thread{} with start = false, else the thread will start right away
+        val shutdown = thread(start = false) { Shutdown(benchmarkExecution, benchmark).run() }
+        Runtime.getRuntime().addShutdownHook(shutdown)
+
+        val executor = TheodoliteExecutor(benchmarkExecution, benchmark)
+        executor.run()
+        logger.info { "Theodolite finished" }
+        Runtime.getRuntime().removeShutdownHook(shutdown)
+        exitProcess(0)
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/AbstractStateHandler.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/AbstractStateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..a7a40cd569f8034f3b8e062dad3031d5643a12e3
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/AbstractStateHandler.kt
@@ -0,0 +1,52 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.api.model.HasMetadata
+import io.fabric8.kubernetes.api.model.KubernetesResourceList
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.client.KubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import java.lang.Thread.sleep
+
+abstract class AbstractStateHandler<T,L,D>(
+    private val client: KubernetesClient,
+    private val crd: Class<T>,
+    private val crdList: Class<L>
+    ): StateHandler<T> where T : CustomResource<*, *>?, T: HasMetadata, T: Namespaced, L: KubernetesResourceList<T> {
+
+    private val crdClient: MixedOperation<T, L,Resource<T>> =
+        this.client.customResources(this.crd, this.crdList)
+
+    @Synchronized
+    override fun setState(resourceName: String, f: (T) -> T?) {
+        this.crdClient
+            .inNamespace(this.client.namespace)
+            .list().items
+            .filter { item -> item.metadata.name == resourceName }
+            .map { customResource -> f(customResource) }
+            .forEach { this.crdClient.updateStatus(it) }
+       }
+
+    @Synchronized
+    override fun getState(resourceName: String, f: (T) -> String?): String? {
+        return this.crdClient
+            .inNamespace(this.client.namespace)
+            .list().items
+            .filter { item -> item.metadata.name == resourceName }
+            .map { customResource -> f(customResource) }
+            .firstOrNull()
+    }
+
+    @Synchronized
+    override fun blockUntilStateIsSet(resourceName: String, desiredStatusString: String, f: (T) -> String?, maxTries: Int): Boolean {
+        for (i in 0.rangeTo(maxTries)) {
+            val currentStatus = getState(resourceName, f)
+                if(currentStatus == desiredStatusString) {
+                    return true
+                }
+            sleep(50)
+        }
+        return false
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ClusterSetup.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ClusterSetup.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8fc951d09598187bcaf4cb7e4a39d322be722792
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ClusterSetup.kt
@@ -0,0 +1,76 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import mu.KotlinLogging
+import org.json.JSONObject
+import theodolite.execution.Shutdown
+import theodolite.k8s.K8sContextFactory
+import theodolite.model.crd.*
+
+private val logger = KotlinLogging.logger {}
+
+class ClusterSetup(
+    private val executionCRDClient: MixedOperation<ExecutionCRD, BenchmarkExecutionList, Resource<ExecutionCRD>>,
+    private val benchmarkCRDClient: MixedOperation<BenchmarkCRD, KubernetesBenchmarkList, Resource<BenchmarkCRD>>,
+    private val client: NamespacedKubernetesClient
+
+    ) {
+    private val serviceMonitorContext = K8sContextFactory().create(
+        api = "v1",
+        scope = "Namespaced",
+        group = "monitoring.coreos.com",
+        plural = "servicemonitors"
+    )
+
+    fun clearClusterState(){
+        stopRunningExecution()
+        clearByLabel()
+    }
+
+    private fun stopRunningExecution() {
+        executionCRDClient
+            .inNamespace(client.namespace)
+            .list()
+            .items
+            .asSequence()
+            .filter {   it.status.executionState == States.RUNNING.value }
+            .forEach { execution ->
+                val benchmark = benchmarkCRDClient
+                    .inNamespace(client.namespace)
+                    .list()
+                    .items
+                    .firstOrNull { it.metadata.name == execution.spec.benchmark }
+
+                if (benchmark != null) {
+                    execution.spec.name = execution.metadata.name
+                    benchmark.spec.name = benchmark.metadata.name
+                    Shutdown(execution.spec, benchmark.spec).start()
+                } else {
+                    logger.error {
+                        "Execution with state ${States.RUNNING.value} was found, but no corresponding benchmark. " +
+                                "Could not initialize cluster." }
+                }
+
+
+            }
+        }
+
+    private  fun clearByLabel() {
+        this.client.services().withLabel("app.kubernetes.io/created-by=theodolite").delete()
+        this.client.apps().deployments().withLabel("app.kubernetes.io/created-by=theodolite").delete()
+        this.client.apps().statefulSets().withLabel("app.kubernetes.io/created-by=theodolite").delete()
+        this.client.configMaps().withLabel("app.kubernetes.io/created-by=theodolite").delete()
+
+        val serviceMonitors = JSONObject(
+            this.client.customResource(serviceMonitorContext)
+                .list(client.namespace, mapOf(Pair("app.kubernetes.io/created-by", "theodolite")))
+        )
+            .getJSONArray("items")
+
+        (0 until serviceMonitors.length())
+            .map { serviceMonitors.getJSONObject(it).getJSONObject("metadata").getString("name") }
+            .forEach { this.client.customResource(serviceMonitorContext).delete(client.namespace, it) }
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ExecutionEventHandler.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ExecutionEventHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4168bd19b57216722ca5301d42ce5e0df3f6c192
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ExecutionEventHandler.kt
@@ -0,0 +1,86 @@
+package theodolite.execution.operator
+
+import com.google.gson.Gson
+import com.google.gson.GsonBuilder
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.model.crd.*
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Handles adding, updating and deleting BenchmarkExecutions.
+ *
+ * @param controller The TheodoliteController that handles the application state
+ *
+ * @see TheodoliteController
+ * @see BenchmarkExecution
+ */
+class ExecutionHandler(
+    private val controller: TheodoliteController,
+    private val stateHandler: ExecutionStateHandler
+) : ResourceEventHandler<ExecutionCRD> {
+    private val gson: Gson = GsonBuilder().enableComplexMapKeySerialization().create()
+
+    /**
+     * Add an execution to the end of the queue of the TheodoliteController.
+     *
+     * @param ExecutionCRD the execution to add
+     */
+    @Synchronized
+    override fun onAdd(execution: ExecutionCRD) {
+        logger.info { "Add execution ${execution.metadata.name}" }
+        execution.spec.name = execution.metadata.name
+        when (this.stateHandler.getExecutionState(execution.metadata.name)) {
+            States.NO_STATE -> this.stateHandler.setExecutionState(execution.spec.name, States.PENDING)
+            States.RUNNING -> {
+                this.stateHandler.setExecutionState(execution.spec.name, States.RESTART)
+                if(this.controller.isExecutionRunning(execution.spec.name)){
+                    this.controller.stop(restart=true)
+                    }
+                }
+        }
+    }
+
+    /**
+     * Updates an execution. If this execution is running at the time this function is called, it is stopped and
+     * added to the beginning of the queue of the TheodoliteController.
+     * Otherwise, it is just added to the beginning of the queue.
+     *
+     * @param oldExecutionCRD the old execution
+     * @param newExecutionCRD the new execution
+     */
+    @Synchronized
+    override fun onUpdate(oldExecution: ExecutionCRD, newExecution: ExecutionCRD) {
+        logger.info { "Receive update event for execution ${oldExecution.metadata.name}" }
+        newExecution.spec.name = newExecution.metadata.name
+        oldExecution.spec.name = oldExecution.metadata.name
+        if(gson.toJson(oldExecution.spec) != gson.toJson(newExecution.spec)) {
+            when(this.stateHandler.getExecutionState(newExecution.metadata.name)) {
+                States.RUNNING -> {
+                        this.stateHandler.setExecutionState(newExecution.spec.name, States.RESTART)
+                         if (this.controller.isExecutionRunning(newExecution.spec.name)){
+                            this.controller.stop(restart=true)
+                            }
+                        }
+                States.RESTART -> {} // should this set to pending?
+                else -> this.stateHandler.setExecutionState(newExecution.spec.name, States.PENDING)
+                }
+            }
+        }
+
+    /**
+     * Delete an execution from the queue of the TheodoliteController.
+     *
+     * @param ExecutionCRD the execution to delete
+     */
+    @Synchronized
+    override fun onDelete(execution: ExecutionCRD, b: Boolean) {
+        logger.info { "Delete execution ${execution.metadata.name}" }
+         if(execution.status.executionState == States.RUNNING.value
+             && this.controller.isExecutionRunning(execution.spec.name)) {
+            this.controller.stop()
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ExecutionStateHandler.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ExecutionStateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..df5e77695d1beb562408f1b5830f6f4353543c75
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/ExecutionStateHandler.kt
@@ -0,0 +1,81 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.KubernetesClient
+import theodolite.model.crd.BenchmarkExecutionList
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.ExecutionStatus
+import theodolite.model.crd.States
+import java.lang.Thread.sleep
+import java.time.Duration
+import java.time.Instant
+import java.util.concurrent.atomic.AtomicBoolean
+
+class ExecutionStateHandler(val client: KubernetesClient):
+    AbstractStateHandler<ExecutionCRD, BenchmarkExecutionList, ExecutionStatus >(
+        client = client,
+        crd = ExecutionCRD::class.java,
+        crdList = BenchmarkExecutionList::class.java
+    ) {
+
+    private var runExecutionDurationTimer: AtomicBoolean = AtomicBoolean(false)
+
+    private fun getExecutionLambda() = { cr: ExecutionCRD -> cr.status.executionState }
+
+    private fun getDurationLambda() = { cr: ExecutionCRD -> cr.status.executionDuration }
+
+    fun setExecutionState(resourceName: String, status: States): Boolean {
+        setState(resourceName) {cr -> cr.status.executionState = status.value; cr}
+        return blockUntilStateIsSet(resourceName, status.value, getExecutionLambda())
+    }
+
+    fun getExecutionState(resourceName: String) : States {
+        val status = this.getState(resourceName, getExecutionLambda())
+        return if(status.isNullOrBlank()){
+            States.NO_STATE
+        } else {
+            States.values().first { it.value == status }
+        }
+    }
+
+    fun setDurationState(resourceName: String, duration: Duration): Boolean {
+        setState(resourceName) { cr -> cr.status.executionDuration = durationToK8sString(duration); cr }
+        return blockUntilStateIsSet(resourceName, durationToK8sString(duration), getDurationLambda())
+    }
+
+    fun getDurationState(resourceName: String): String {
+        val status = getState(resourceName, getDurationLambda())
+        return if (status.isNullOrBlank()) {
+            "-"
+        } else {
+            status
+        }
+    }
+
+    private fun durationToK8sString(duration: Duration): String {
+        val sec = duration.seconds
+        return when {
+            sec <= 120 -> "${sec}s" // max 120s
+            sec < 60 * 99 -> "${duration.toMinutes()}m" // max 99m
+            sec < 60 * 60 * 99 -> "${duration.toHours()}h"   // max 99h
+            else -> "${duration.toDays()}d + ${duration.minusDays(duration.toDays()).toHours()}h"
+        }
+    }
+
+    fun startDurationStateTimer(resourceName: String) {
+        this.runExecutionDurationTimer.set(true)
+        val startTime = Instant.now().toEpochMilli()
+        Thread {
+            while (this.runExecutionDurationTimer.get()) {
+                val duration = Duration.ofMillis(Instant.now().minusMillis(startTime).toEpochMilli())
+                setDurationState(resourceName, duration)
+                sleep(100 * 1)
+            }
+        }.start()
+    }
+
+    @Synchronized
+    fun stopDurationStateTimer() {
+        this.runExecutionDurationTimer.set(false)
+        sleep(100 * 2)
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/LeaderElector.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/LeaderElector.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9d093e4851e5c43d29a3fea3057ccf01be612e63
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/LeaderElector.kt
@@ -0,0 +1,43 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.extended.leaderelection.LeaderCallbacks
+import io.fabric8.kubernetes.client.extended.leaderelection.LeaderElectionConfigBuilder
+import io.fabric8.kubernetes.client.extended.leaderelection.resourcelock.LeaseLock
+import mu.KotlinLogging
+import java.time.Duration
+import java.util.*
+import kotlin.reflect.KFunction0
+
+private val logger = KotlinLogging.logger {}
+
+class LeaderElector(
+    val client:  NamespacedKubernetesClient,
+    val name: String
+    ) {
+
+    fun getLeadership(leader: KFunction0<Unit>) {
+        val lockIdentity: String = UUID.randomUUID().toString()
+            DefaultKubernetesClient().use { kc ->
+                kc.leaderElector()
+                .withConfig(
+                    LeaderElectionConfigBuilder()
+                        .withName("Theodolite")
+                        .withLeaseDuration(Duration.ofSeconds(15L))
+                        .withLock(LeaseLock(client.namespace, name, lockIdentity))
+                        .withRenewDeadline(Duration.ofSeconds(10L))
+                        .withRetryPeriod(Duration.ofSeconds(2L))
+                        .withLeaderCallbacks(LeaderCallbacks(
+                            { Thread{leader()}.start() },
+                            { logger.info { "STOPPED LEADERSHIP" } }
+                        ) { newLeader: String? ->
+                           logger.info { "New leader elected $newLeader" }
+                        })
+                        .build()
+                )
+                .build().run()
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/StateHandler.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/StateHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cefcf2ec97986375205205fd95ddcd2ff7eacf5a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/StateHandler.kt
@@ -0,0 +1,14 @@
+package theodolite.execution.operator
+
+private const val MAX_TRIES: Int = 5
+
+interface StateHandler<T> {
+    fun setState(resourceName: String, f: (T) -> T?)
+    fun getState(resourceName: String, f: (T) -> String?): String?
+    fun blockUntilStateIsSet(
+        resourceName: String,
+        desiredStatusString: String,
+        f: (T) -> String?,
+        maxTries: Int = MAX_TRIES): Boolean
+
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/TheodoliteController.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/TheodoliteController.kt
new file mode 100644
index 0000000000000000000000000000000000000000..c49810fc5838e63211b3a796e5d54085219418c8
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/TheodoliteController.kt
@@ -0,0 +1,195 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import mu.KotlinLogging
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.execution.TheodoliteExecutor
+import theodolite.model.crd.*
+import theodolite.util.ConfigurationOverride
+import theodolite.util.PatcherDefinition
+import java.lang.Thread.sleep
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The controller implementation for Theodolite.
+ *
+ * @see NamespacedKubernetesClient
+ * @see CustomResourceDefinitionContext
+ * @see BenchmarkExecution
+ * @see KubernetesBenchmark
+ * @see ConcurrentLinkedDeque
+ */
+
+class TheodoliteController(
+    private val namespace: String,
+    val path: String,
+    private val executionCRDClient: MixedOperation<ExecutionCRD, BenchmarkExecutionList, Resource<ExecutionCRD>>,
+    private val benchmarkCRDClient: MixedOperation<BenchmarkCRD, KubernetesBenchmarkList, Resource<BenchmarkCRD>>,
+    private val executionStateHandler: ExecutionStateHandler
+) {
+    lateinit var executor: TheodoliteExecutor
+    /**
+     *
+     * Runs the TheodoliteController forever.
+     */
+    fun run() {
+        sleep(5000) // wait until all states are correctly set
+        while (true) {
+            reconcile()
+            sleep(2000)
+        }
+    }
+
+    private fun reconcile() {
+        do {
+            val execution = getNextExecution()
+            if (execution != null) {
+                val benchmark = getBenchmarks()
+                    .firstOrNull { it.name == execution.benchmark }
+                if (benchmark != null) {
+                    runExecution(execution, benchmark)
+                }
+            } else {
+                logger.info { "Could not find executable execution." }
+            }
+        } while (execution != null)
+    }
+
+    /**
+     * Execute a benchmark with a defined KubernetesBenchmark and BenchmarkExecution
+     *
+     * @see BenchmarkExecution
+     */
+    private fun runExecution(execution: BenchmarkExecution, benchmark: KubernetesBenchmark) {
+        setAdditionalLabels(execution.name,
+            "deployed-for-execution",
+            benchmark.appResource + benchmark.loadGenResource,
+            execution)
+        setAdditionalLabels(benchmark.name,
+            "deployed-for-benchmark",
+            benchmark.appResource + benchmark.loadGenResource,
+            execution)
+        setAdditionalLabels("theodolite",
+            "app.kubernetes.io/created-by",
+            benchmark.appResource + benchmark.loadGenResource,
+            execution)
+
+        executionStateHandler.setExecutionState(execution.name, States.RUNNING)
+        executionStateHandler.startDurationStateTimer(execution.name)
+
+        try {
+            executor = TheodoliteExecutor(execution, benchmark)
+            executor.run()
+            when (executionStateHandler.getExecutionState(execution.name)) {
+                States.RESTART -> runExecution(execution, benchmark)
+                States.RUNNING -> {
+                    executionStateHandler.setExecutionState(execution.name, States.FINISHED)
+                    logger.info { "Execution of ${execution.name} is finally stopped." }
+                }
+            }
+        } catch (e: Exception) {
+            logger.error { "Failure while executing execution ${execution.name} with benchmark ${benchmark.name}." }
+            logger.error { "Problem is: $e" }
+            executionStateHandler.setExecutionState(execution.name, States.FAILURE)
+        }
+        executionStateHandler.stopDurationStateTimer()
+    }
+
+    @Synchronized
+    fun stop(restart: Boolean = false) {
+        if (!::executor.isInitialized) return
+        if (restart) {
+            executionStateHandler.setExecutionState(this.executor.getExecution().name, States.RESTART)
+        } else {
+            executionStateHandler.setExecutionState(this.executor.getExecution().name, States.INTERRUPTED)
+            logger.warn { "Execution ${executor.getExecution().name} unexpected interrupted" }
+        }
+        this.executor.executor.run.set(false)
+    }
+
+    /**
+     * @return all available [BenchmarkCRD]s
+     */
+    private fun getBenchmarks(): List<KubernetesBenchmark> {
+        return this.benchmarkCRDClient
+            .inNamespace(namespace)
+            .list()
+            .items
+            .map { it.spec.name = it.metadata.name; it }
+            .map { it.spec.path = path; it }
+            .map { it.spec }
+    }
+
+    /**
+     * Get the [BenchmarkExecution] for the next run. Which [BenchmarkExecution]
+     * is selected for the next execution depends on three points:
+     *
+     * 1. Only executions are considered for which a matching benchmark is available on the cluster
+     * 2. The Status of the execution must be [States.PENDING] or [States.RESTART]
+     * 3. Of the remaining [BenchmarkCRD], those with status [States.RESTART] are preferred,
+     * then, if there is more than one, the oldest execution is chosen.
+     *
+     * @return the next execution or null
+     */
+    private fun getNextExecution(): BenchmarkExecution? {
+        val availableBenchmarkNames = getBenchmarks()
+            .map { it.name }
+
+        return executionCRDClient
+            .inNamespace(namespace)
+            .list()
+            .items
+            .asSequence()
+            .map { it.spec.name = it.metadata.name; it }
+            .filter {
+                it.status.executionState == States.PENDING.value ||
+                        it.status.executionState == States.RESTART.value
+            }
+            .filter { availableBenchmarkNames.contains(it.spec.benchmark) }
+            .sortedWith(stateComparator().thenBy { it.metadata.creationTimestamp })
+            .map { it.spec }
+            .firstOrNull()
+    }
+
+    /**
+     * Simple comparator which can be used to order a list of [ExecutionCRD] such that executions with
+     * status [States.RESTART] are before all other executions.
+     */
+    private fun stateComparator() = Comparator<ExecutionCRD> { a, b ->
+        when {
+            (a == null && b == null) -> 0
+            (a.status.executionState == States.RESTART.value) -> -1
+            else -> 1
+        }
+    }
+
+    fun isExecutionRunning(executionName: String): Boolean {
+        if (!::executor.isInitialized) return false
+        return this.executor.getExecution().name == executionName
+    }
+
+    private fun setAdditionalLabels(
+        labelValue: String,
+        labelName: String,
+        resources: List<String>,
+        execution: BenchmarkExecution
+    ) {
+        val additionalConfigOverrides = mutableListOf<ConfigurationOverride>()
+        resources.forEach {
+            run {
+                val configurationOverride = ConfigurationOverride()
+                configurationOverride.patcher = PatcherDefinition()
+                configurationOverride.patcher.type = "LabelPatcher"
+                configurationOverride.patcher.properties = mutableMapOf("variableName" to labelName)
+                configurationOverride.patcher.resource = it
+                configurationOverride.value = labelValue
+                additionalConfigOverrides.add(configurationOverride)
+            }
+        }
+        execution.configOverrides.addAll(additionalConfigOverrides)
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/TheodoliteOperator.kt b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/TheodoliteOperator.kt
new file mode 100644
index 0000000000000000000000000000000000000000..60e238c27877c52a55fba307b036f2d498a1f76a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/execution/operator/TheodoliteOperator.kt
@@ -0,0 +1,111 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import io.fabric8.kubernetes.internal.KubernetesDeserializer
+import mu.KotlinLogging
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.model.crd.BenchmarkExecutionList
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.KubernetesBenchmarkList
+
+
+private const val DEFAULT_NAMESPACE = "default"
+private const val EXECUTION_SINGULAR = "execution"
+private const val BENCHMARK_SINGULAR = "benchmark"
+private const val API_VERSION = "v1"
+private const val RESYNC_PERIOD = 10 * 60 * 1000.toLong()
+private const val GROUP = "theodolite.com"
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Implementation of the Operator pattern for K8s.
+ *
+ * **See Also:** [Kubernetes Operator Pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
+ */
+class TheodoliteOperator {
+    private val namespace = System.getenv("NAMESPACE") ?: DEFAULT_NAMESPACE
+    val client: NamespacedKubernetesClient = DefaultKubernetesClient().inNamespace(namespace)
+
+
+    fun start() {
+        LeaderElector(
+            client = client,
+            name = "theodolite-operator"
+        )
+            .getLeadership(::startOperator)
+    }
+
+    /**
+     * Start the operator.
+     */
+   private fun startOperator() {
+        logger.info { "Using $namespace as namespace." }
+        client.use {
+            KubernetesDeserializer.registerCustomKind(
+                "$GROUP/$API_VERSION",
+                EXECUTION_SINGULAR,
+                ExecutionCRD::class.java
+            )
+
+            KubernetesDeserializer.registerCustomKind(
+                "$GROUP/$API_VERSION",
+                BENCHMARK_SINGULAR,
+                BenchmarkCRD::class.java
+            )
+
+            val executionCRDClient: MixedOperation<
+                    ExecutionCRD,
+                    BenchmarkExecutionList,
+                    Resource<ExecutionCRD>>
+                = client.customResources(
+                    ExecutionCRD::class.java,
+                    BenchmarkExecutionList::class.java
+            )
+
+            val benchmarkCRDClient: MixedOperation<
+                    BenchmarkCRD,
+                    KubernetesBenchmarkList,
+                    Resource<BenchmarkCRD>>
+                = client.customResources(
+                    BenchmarkCRD::class.java,
+                    KubernetesBenchmarkList::class.java
+            )
+
+            val executionStateHandler = ExecutionStateHandler(
+                client = client)
+
+            val appResource = System.getenv("THEODOLITE_APP_RESOURCES") ?: "./config"
+            val controller =
+                TheodoliteController(
+                    namespace = client.namespace,
+                    path = appResource,
+                    benchmarkCRDClient = benchmarkCRDClient,
+                    executionCRDClient = executionCRDClient,
+                    executionStateHandler = executionStateHandler)
+
+            val informerFactory = client.informers()
+            val informerExecution = informerFactory.sharedIndexInformerForCustomResource(
+                ExecutionCRD::class.java,
+                BenchmarkExecutionList::class.java,
+                RESYNC_PERIOD
+            )
+
+            informerExecution.addEventHandler(ExecutionHandler(
+                controller = controller,
+                stateHandler = executionStateHandler))
+
+            ClusterSetup(
+                executionCRDClient = executionCRDClient,
+                benchmarkCRDClient = benchmarkCRDClient,
+                client = client
+            ).clearClusterState()
+
+            informerFactory.startAllRegisteredInformers()
+            controller.run()
+
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/k8s/CustomResourceWrapper.kt b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/CustomResourceWrapper.kt
new file mode 100644
index 0000000000000000000000000000000000000000..31a95be04e3290e0797dca5c588394ea36279b0c
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/CustomResourceWrapper.kt
@@ -0,0 +1,49 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+import mu.KotlinLogging
+
+private val logger = KotlinLogging.logger {}
+
+class CustomResourceWrapper(val crAsMap: Map<String, String>, private val context: CustomResourceDefinitionContext) : KubernetesResource {
+    /**
+     * Deploy a service monitor
+     *
+     * @param client a namespaced Kubernetes client which are used to deploy the CR object.
+     *
+     * @throws java.io.IOException if the resource could not be deployed.
+     */
+    fun deploy(client: NamespacedKubernetesClient) {
+        client.customResource(this.context)
+            .createOrReplace(client.configuration.namespace, this.crAsMap as Map<String, Any>)
+    }
+
+    /**
+     * Delete a service monitor
+     *
+     * @param client a namespaced Kubernetes client which are used to delete the CR object.
+     */
+    fun delete(client: NamespacedKubernetesClient) {
+        try {
+            client.customResource(this.context)
+                .delete(client.configuration.namespace, this.getName())
+        } catch (e: Exception) {
+            logger.warn { "Could not delete service monitor" }
+        }
+    }
+
+    /**
+     * @throws NullPointerException if name or metadata is null
+     */
+    fun getName(): String {
+        val metadataAsMap = this.crAsMap["metadata"]!! as Map<String, String>
+        return metadataAsMap["name"]!!
+    }
+
+    fun getLabels(): Map<String, String>{
+        val metadataAsMap = this.crAsMap["metadata"]!! as Map<String, String>
+        return metadataAsMap["labels"]!! as Map<String, String>
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sContextFactory.kt b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sContextFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7eb209bfbab02bb94d34c985aa308173e509d4e4
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sContextFactory.kt
@@ -0,0 +1,32 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+
+/**
+ * Factory for CustomResourceDefinitionContext
+ *
+ * @see CustomResourceDefinitionContext
+ */
+class K8sContextFactory {
+
+    /**
+     * Create a CustomResourceDefinitionContext.
+     *
+     * @param api The K8s API version
+     * @param scope The scope of the CRD
+     * @param group The group of the CRD
+     * @param plural The plural name (kind) of the CRD
+     *
+     * @return a new CustomResourceDefinitionContext
+     *
+     * @see CustomResourceDefinitionContext
+     */
+    fun create(api: String, scope: String, group: String, plural: String): CustomResourceDefinitionContext {
+        return CustomResourceDefinitionContext.Builder()
+            .withVersion(api)
+            .withScope(scope)
+            .withGroup(group)
+            .withPlural(plural)
+            .build()
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sManager.kt b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sManager.kt
new file mode 100644
index 0000000000000000000000000000000000000000..77350868500ffa974ab2b9fadfb8cfd915c8aaf2
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sManager.kt
@@ -0,0 +1,73 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import mu.KotlinLogging
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * This class is used to deploy or remove different Kubernetes resources.
+ * Supports: Deployments, Services, ConfigMaps, StatefulSets, and CustomResources.
+ * @param client KubernetesClient used to deploy or remove.
+ */
+class K8sManager(private val client: NamespacedKubernetesClient) {
+
+    /**
+     * Deploys different k8s resources using the client.
+     * @throws IllegalArgumentException if KubernetesResource not supported.
+     */
+    fun deploy(resource: KubernetesResource) {
+        when (resource) {
+            is Deployment ->
+                this.client.apps().deployments().createOrReplace(resource)
+            is Service ->
+                this.client.services().createOrReplace(resource)
+            is ConfigMap ->
+                this.client.configMaps().createOrReplace(resource)
+            is StatefulSet ->
+                this.client.apps().statefulSets().createOrReplace(resource)
+            is CustomResourceWrapper -> resource.deploy(client)
+            else -> throw IllegalArgumentException("Unknown Kubernetes resource.")
+        }
+    }
+
+    /**
+     * Removes different k8s resources using the client.
+     * @throws IllegalArgumentException if KubernetesResource not supported.
+     */
+    fun remove(resource: KubernetesResource) {
+        when (resource) {
+            is Deployment -> {
+                val label = resource.spec.selector.matchLabels["app"]!!
+                this.client.apps().deployments().delete(resource)
+                blockUntilPodsDeleted(label)
+                logger.info { "Deployment '${resource.metadata.name}' deleted." }
+            }
+            is Service ->
+                this.client.services().delete(resource)
+            is ConfigMap ->
+                this.client.configMaps().delete(resource)
+            is StatefulSet -> {
+                val label = resource.spec.selector.matchLabels["app"]!!
+                this.client.apps().statefulSets().delete(resource)
+                blockUntilPodsDeleted(label)
+                logger.info { "StatefulSet '$resource.metadata.name' deleted." }
+            }
+            is CustomResourceWrapper -> resource.delete(client)
+            else -> throw IllegalArgumentException("Unknown Kubernetes resource.")
+        }
+    }
+
+    private fun blockUntilPodsDeleted(podLabel: String) {
+        while (!this.client.pods().withLabel(podLabel).list().items.isNullOrEmpty()) {
+            logger.info { "Wait for pods with label '$podLabel' to be deleted." }
+            Thread.sleep(1000)
+        }
+    }
+
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sResourceLoader.kt b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sResourceLoader.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ab4bef3eaa0d93032ab9edacb510ba1b750e2dd6
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/K8sResourceLoader.kt
@@ -0,0 +1,153 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.base.CustomResourceDefinitionContext
+import mu.KotlinLogging
+import theodolite.util.YamlParser
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * Used to load different Kubernetes resources.
+ * Supports: Deployments, Services, ConfigMaps, and CustomResources.
+ * @param client KubernetesClient used to deploy or remove.
+ */
+class K8sResourceLoader(private val client: NamespacedKubernetesClient) {
+
+    /**
+     * Parses a Service from a service yaml
+     * @param path of the yaml file
+     * @return Service from fabric8
+     */
+    private fun loadService(path: String): Service {
+        return loadGenericResource(path) { x: String -> client.services().load(x).get() }
+    }
+
+
+    /**
+     * Parses a CustomResource from a yaml
+     * @param path of the yaml file
+     * @param context specific crd context for this custom resource
+     * @return  CustomResourceWrapper from fabric8
+     */
+   private fun loadCustomResourceWrapper(path: String, context: CustomResourceDefinitionContext): CustomResourceWrapper {
+       return loadGenericResource(path) {
+           CustomResourceWrapper(
+               YamlParser().parse(
+                   path,
+                   HashMap<String, String>()::class.java
+               )!!,
+               context
+           )
+       }
+   }
+
+    private fun loadServiceMonitor(path: String): CustomResourceWrapper {
+        val context = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "monitoring.coreos.com",
+            plural = "servicemonitors"
+        )
+        return loadCustomResourceWrapper(path, context)
+    }
+
+    private fun loadExecution(path: String): KubernetesResource {
+        val context = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "theodolite.com",
+            plural = "executions"
+        )
+        return loadCustomResourceWrapper(path, context)
+    }
+
+    private fun loadBenchmark(path: String): KubernetesResource {
+        val context = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "theodolite.com",
+            plural = "benchmarks"
+        )
+        return loadCustomResourceWrapper(path, context)
+    }
+
+
+    /**
+     * Parses a Deployment from a Deployment yaml
+     * @param path of the yaml file
+     * @return Deployment from fabric8
+     */
+    private fun loadDeployment(path: String): Deployment {
+        return loadGenericResource(path) { x: String -> client.apps().deployments().load(x).get() }
+    }
+
+    /**
+     * Parses a ConfigMap from a ConfigMap yaml
+     * @param path of the yaml file
+     * @return ConfigMap from fabric8
+     */
+    private fun loadConfigmap(path: String): ConfigMap {
+        return loadGenericResource(path) { x: String -> client.configMaps().load(x).get() }
+    }
+
+    /**
+     * Parses a StatefulSet from a StatefulSet yaml
+     * @param path of the yaml file
+     * @return StatefulSet from fabric8
+     */
+    private fun loadStatefulSet(path: String): KubernetesResource {
+        return loadGenericResource(path) { x: String -> client.apps().statefulSets().load(x).get() }
+
+    }
+
+    /**
+     * Generic helper function to load a resource.
+     * @param path of the resource
+     * @param f function that is applied to the resource.
+     * @throws IllegalArgumentException If the resource could not be loaded.
+     */
+    private fun <T> loadGenericResource(path: String, f: (String) -> T): T {
+        var resource: T? = null
+
+        try {
+            resource = f(path)
+        } catch (e: Exception) {
+            logger.warn { "You potentially  misspelled the path: $path" }
+            logger.warn { e }
+        }
+
+        if (resource == null) {
+            throw IllegalArgumentException("The Resource at path: $path could not be loaded")
+        }
+        return resource
+    }
+
+    /**
+     * Factory function used to load different k8s resources from a path.
+     * Supported kinds are: Deployments, Services, ServiceMonitors, ConfigMaps and CustomResources.
+     * Uses CustomResource as default if Kind is not supported.
+     * @param kind of the resource. CustomResource as default.
+     * @param path of the resource to be loaded.
+     * @throws Exception if the resource could not be loaded.
+     */
+    fun loadK8sResource(kind: String, path: String): KubernetesResource {
+        return when (kind) {
+            "Deployment" -> loadDeployment(path)
+            "Service" -> loadService(path)
+            "ServiceMonitor" -> loadServiceMonitor(path)
+            "ConfigMap" -> loadConfigmap(path)
+            "StatefulSet" -> loadStatefulSet(path)
+            "Execution" -> loadExecution(path)
+            "Benchmark" -> loadBenchmark(path)
+            else -> {
+                logger.error { "Error during loading of unspecified resource Kind" }
+                throw java.lang.IllegalArgumentException("error while loading resource with kind: $kind")
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/k8s/TopicManager.kt b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/TopicManager.kt
new file mode 100644
index 0000000000000000000000000000000000000000..3bbae82d77dc5b01a5827c7ee713bf2566be1bab
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/k8s/TopicManager.kt
@@ -0,0 +1,113 @@
+package theodolite.k8s
+
+import mu.KotlinLogging
+import org.apache.kafka.clients.admin.AdminClient
+import org.apache.kafka.clients.admin.CreateTopicsResult
+import org.apache.kafka.clients.admin.NewTopic
+import org.apache.kafka.common.errors.TopicExistsException
+import java.lang.Thread.sleep
+
+private val logger = KotlinLogging.logger {}
+private const val RETRY_TIME = 2000L
+
+/**
+ * Manages the topics related tasks
+ * @param kafkaConfig Kafka configuration as a Map
+ * @constructor Creates a KafkaAdminClient
+ */
+class TopicManager(private val kafkaConfig: Map<String, Any>) {
+
+    /**
+     * Create topics.
+     * @param newTopics Collection of all topic that should be created
+     */
+    fun createTopics(newTopics: Collection<NewTopic>) {
+        val kafkaAdmin: AdminClient = AdminClient.create(this.kafkaConfig)
+        lateinit var result: CreateTopicsResult
+
+        do {
+            var retryCreation = false
+            try {
+                result = kafkaAdmin.createTopics(newTopics)
+                result.all().get() // wait for the future to be completed
+            } catch (e: Exception) { // TopicExistsException
+                logger.warn(e) { "Error during topic creation." }
+                logger.debug { e } // TODO remove due to attached exception to warn log?
+                logger.info { "Remove existing topics." }
+                delete(newTopics.map { topic -> topic.name() }, kafkaAdmin)
+                logger.info { "Will retry the topic creation in ${RETRY_TIME/1000} seconds." }
+                sleep(RETRY_TIME)
+                retryCreation = true
+            }
+        } while (retryCreation)
+
+        logger.info {
+            "Topics creation finished with result: ${
+                result
+                    .values()
+                    .map { it.key + ": " + it.value.isDone }
+                    .joinToString(separator = ",")
+            } "
+        }
+        kafkaAdmin.close()
+    }
+
+    /**
+     * Remove topics.
+     * @param topics Collection of names for the topics to remove.
+     */
+    fun removeTopics(topics: List<String>) {
+        val kafkaAdmin: AdminClient = AdminClient.create(this.kafkaConfig)
+        val currentTopics = kafkaAdmin.listTopics().names().get()
+        delete(currentTopics.filter { matchRegex(it, topics) }, kafkaAdmin)
+        kafkaAdmin.close()
+    }
+
+    /**
+     * This function checks whether one string in `topics` can be used as prefix of a regular expression
+     * to create the string `existingTopic`.
+     *
+     * @param existingTopic string for which should be checked if it could be created.
+     * @param topics list of string which are used as possible prefixes to create `existingTopic`.
+     * @return true, `existingTopics` matches a created regex, else false.
+     */
+    private fun matchRegex(existingTopic: String, topics: List<String>): Boolean {
+        for (t in topics) {
+            val regex = t.toRegex()
+            if (regex.matches(existingTopic)) {
+                return true
+            }
+        }
+        return false
+    }
+
+    private fun delete(topics: List<String>, kafkaAdmin: AdminClient) {
+        var deleted = false
+
+        while (!deleted) {
+            try {
+                val result = kafkaAdmin.deleteTopics(topics)
+                result.all().get() // wait for the future to be completed
+                logger.info {
+                    "Topics deletion finished with result: ${
+                        result.values().map { it.key + ": " + it.value.isDone }
+                            .joinToString(separator = ",")
+                    }"
+                }
+            } catch (e: Exception) {
+                logger.error(e) { "Error while removing topics: $e" }
+                logger.info { "Existing topics are: ${kafkaAdmin.listTopics().names().get()}." }
+            }
+
+            val toDelete = topics.filter { kafkaAdmin.listTopics().names().get().contains(it) }
+
+            if (toDelete.isNullOrEmpty()) {
+                deleted = true
+            } else {
+                logger.info { "Deletion of Kafka topics failed, will retry in ${RETRY_TIME/1000} seconds." }
+                sleep(RETRY_TIME)
+            }
+        }
+    }
+
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/BenchmarkCRD.kt b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/BenchmarkCRD.kt
new file mode 100644
index 0000000000000000000000000000000000000000..377708af7b7e1a50ae1e33064b2668c364e0685a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/BenchmarkCRD.kt
@@ -0,0 +1,18 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.HasMetadata
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.model.annotation.Group
+import io.fabric8.kubernetes.model.annotation.Kind
+import io.fabric8.kubernetes.model.annotation.Version
+import theodolite.benchmark.KubernetesBenchmark
+
+@JsonDeserialize
+@Version("v1")
+@Group("theodolite.com")
+@Kind("benchmark")
+class BenchmarkCRD(
+    var spec: KubernetesBenchmark = KubernetesBenchmark()
+) : CustomResource<KubernetesBenchmark, Void>(), Namespaced, HasMetadata
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/BenchmarkExecutionList.kt b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/BenchmarkExecutionList.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2b2dcc07f9c37f1712109e3d092f2db0c139e1c8
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/BenchmarkExecutionList.kt
@@ -0,0 +1,5 @@
+package theodolite.model.crd
+
+import io.fabric8.kubernetes.client.CustomResourceList
+
+class BenchmarkExecutionList : CustomResourceList<ExecutionCRD>()
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/ExecutionCRD.kt b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/ExecutionCRD.kt
new file mode 100644
index 0000000000000000000000000000000000000000..659621e8c3b1d5308a10d81240575dd3d432b53f
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/ExecutionCRD.kt
@@ -0,0 +1,18 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.Namespaced
+import io.fabric8.kubernetes.client.CustomResource
+import io.fabric8.kubernetes.model.annotation.Group
+import io.fabric8.kubernetes.model.annotation.Kind
+import io.fabric8.kubernetes.model.annotation.Version
+import theodolite.benchmark.BenchmarkExecution
+
+@JsonDeserialize
+@Version("v1")
+@Group("theodolite.com")
+@Kind("execution")
+class ExecutionCRD(
+    var spec: BenchmarkExecution = BenchmarkExecution(),
+    var status: ExecutionStatus = ExecutionStatus()
+) : CustomResource<BenchmarkExecution, ExecutionStatus>(), Namespaced
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/ExecutionStatus.kt b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/ExecutionStatus.kt
new file mode 100644
index 0000000000000000000000000000000000000000..51b76fcee8fb35c83dca407691833dbb235b29c5
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/ExecutionStatus.kt
@@ -0,0 +1,11 @@
+package theodolite.model.crd
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Namespaced
+
+@JsonDeserialize
+class ExecutionStatus(): KubernetesResource, Namespaced {
+    var executionState: String = ""
+    var executionDuration: String = "-"
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/KubernetesBenchmarkList.kt b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/KubernetesBenchmarkList.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8ad0a493d948bf5f78741052100766dcf6e316ec
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/KubernetesBenchmarkList.kt
@@ -0,0 +1,5 @@
+package theodolite.model.crd
+
+import io.fabric8.kubernetes.client.CustomResourceList
+
+class KubernetesBenchmarkList : CustomResourceList<BenchmarkCRD>()
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/States.kt b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/States.kt
new file mode 100644
index 0000000000000000000000000000000000000000..79af297915b6703b209acb0c13913482e54db2be
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/model/crd/States.kt
@@ -0,0 +1,11 @@
+package theodolite.model.crd
+
+enum class States(val value: String) {
+    RUNNING("RUNNING"),
+    PENDING("PENDING"),
+    FAILURE("FAILURE"),
+    FINISHED("FINISHED"),
+    RESTART("RESTART"),
+    INTERRUPTED("INTERRUPTED"),
+    NO_STATE("NoState")
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/AbstractPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/AbstractPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..df80e9cbd2503685a7dbed35db5319920dfc42cb
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/AbstractPatcher.kt
@@ -0,0 +1,24 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+
+/**
+ * A Patcher is able to modify values of a Kubernetes resource, see [Patcher].
+ *
+ * An AbstractPatcher is created with up to three parameters.
+ *
+ * @param k8sResource The Kubernetes resource to be patched.
+ * @param container *(optional)* The name of the container to be patched
+ * @param variableName *(optional)* The variable name to be patched
+ *
+ *
+ * **For example** to patch the load dimension of a load generator, the patcher should be created as follow:
+ *
+ * k8sResource: `uc-1-workload-generator.yaml`
+ * container: `workload`
+ * variableName: `NUM_SENSORS`
+ *
+ */
+abstract class AbstractPatcher(
+    k8sResource: KubernetesResource
+) : Patcher
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/EnvVarPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/EnvVarPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..416aec74a3af9b74594f5e6cd018682bf91cbf63
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/EnvVarPatcher.kt
@@ -0,0 +1,60 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.Container
+import io.fabric8.kubernetes.api.model.EnvVar
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The EnvVarPatcher allows to modify the value of an environment variable
+ *
+ * @property k8sResource Kubernetes resource to be patched.
+ * @property container Container to be patched.
+ * @property variableName Name of the environment variable to be patched.
+ */
+class EnvVarPatcher(
+    private val k8sResource: KubernetesResource,
+    private val container: String,
+    private val variableName: String
+) : AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            this.setEnv(
+                k8sResource, this.container,
+                mapOf(this.variableName to value) as Map<kotlin.String, kotlin.String>
+            )
+        }
+    }
+
+    /**
+     * Sets the ContainerEnvironmentVariables, creates new if variable does not exist.
+     * @param container - The Container
+     * @param map - Map of k=Name,v =Value of EnvironmentVariables
+     */
+    private fun setContainerEnv(container: Container, map: Map<String, String>) {
+        map.forEach { (k, v) ->
+            // filter for matching name and set value
+            val x = container.env.filter { envVar -> envVar.name == k }
+
+            if (x.isEmpty()) {
+                val newVar = EnvVar()
+                newVar.name = k
+                newVar.value = v
+                container.env.add(newVar)
+            } else {
+                x.forEach {
+                    it.value = v
+                }
+            }
+        }
+    }
+
+    /**
+     * Set the environment Variable for a container
+     */
+    private fun setEnv(workloadDeployment: Deployment, containerName: String, map: Map<String, String>) {
+        workloadDeployment.spec.template.spec.containers.filter { it.name == containerName }
+            .forEach { setContainerEnv(it, map) }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ImagePatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ImagePatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8f6753372076c119324dc962112928253633b6b0
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ImagePatcher.kt
@@ -0,0 +1,27 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+
+/**
+ * The Image patcher allows to change the image of a container.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param container Container to be patched.
+ */
+class ImagePatcher(private val k8sResource: KubernetesResource, private val container: String) :
+    AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(imagePath: String) {
+        if (k8sResource is Deployment) {
+            k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                it.image = imagePath as kotlin.String
+            }
+        } else if (k8sResource is StatefulSet) {
+            k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                it.image = imagePath as kotlin.String
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/LabelPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/LabelPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4fa7fc893cfaf864d935074ff50af8d61f7aac76
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/LabelPatcher.kt
@@ -0,0 +1,49 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.CustomResource
+
+class LabelPatcher(private val k8sResource: KubernetesResource, val variableName: String) :
+    AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(labelValue: String) {
+        if(labelValue is kotlin.String){
+            when(k8sResource){
+                is Deployment -> {
+                    if (k8sResource.metadata.labels == null){
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is StatefulSet -> {
+                    if (k8sResource.metadata.labels == null){
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is Service -> {
+                    if (k8sResource.metadata.labels == null){
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is ConfigMap -> {
+                    if (k8sResource.metadata.labels == null){
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+                is CustomResource<*,*> -> {
+                    if (k8sResource.metadata.labels == null){
+                        k8sResource.metadata.labels = mutableMapOf()
+                    }
+                    k8sResource.metadata.labels[this.variableName] = labelValue
+                }
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NodeSelectorPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NodeSelectorPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0e8cd553a6c6a9ed6fa2c8cc1b84e4cfebe79d73
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NodeSelectorPatcher.kt
@@ -0,0 +1,19 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The Node selector patcher make it possible to set the NodeSelector of a Kubernetes deployment.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param variableName The `label-key` of the node for which the `label-value` is to be patched.
+ */
+class NodeSelectorPatcher(private val k8sResource: KubernetesResource, private val variableName: String) :
+    AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            k8sResource.spec.template.spec.nodeSelector = mapOf(variableName to value as kotlin.String)
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NumNestedGroupsLoadGeneratorReplicaPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NumNestedGroupsLoadGeneratorReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..65489a96974ad566fe7cbd88cf6ff7fb49135e1d
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NumNestedGroupsLoadGeneratorReplicaPatcher.kt
@@ -0,0 +1,22 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import kotlin.math.pow
+
+class NumNestedGroupsLoadGeneratorReplicaPatcher(
+    private val k8sResource: KubernetesResource,
+    private val numSensors: String,
+    private val loadGenMaxRecords: String
+    ) :
+    AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            if (value is kotlin.String) {
+                val approxNumSensors =  numSensors.toDouble().pow(Integer.parseInt(value).toDouble())
+                val loadGenInstances = (approxNumSensors + loadGenMaxRecords.toDouble() - 1) / loadGenMaxRecords.toDouble()
+                this.k8sResource.spec.replicas = loadGenInstances.toInt()
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NumSensorsLoadGeneratorReplicaPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NumSensorsLoadGeneratorReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f6a06324e36d7942d3944a492fee263f428376c1
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/NumSensorsLoadGeneratorReplicaPatcher.kt
@@ -0,0 +1,20 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+
+class NumSensorsLoadGeneratorReplicaPatcher(
+    private val k8sResource: KubernetesResource,
+    private val loadGenMaxRecords: String
+) :
+    AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            if (value is kotlin.String) {
+                val loadGenInstances = (Integer.parseInt(value) + loadGenMaxRecords.toInt() - 1) / loadGenMaxRecords.toInt()
+                this.k8sResource.spec.replicas = loadGenInstances
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/Patcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/Patcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..84b886cb4f06b3e667eb8b8aeaa622e1ee54852e
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/Patcher.kt
@@ -0,0 +1,20 @@
+package theodolite.patcher
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * A patcher can be used to modify values of Kubernetes resource.
+ *
+ * @constructor Create empty Patcher
+ */
+@RegisterForReflection
+interface Patcher {
+    /**
+     * The patch method modifies a value in the definition of a
+     * Kubernetes resource.
+     *
+     * @param T The type of value
+     * @param value The value to be used.
+     */
+    fun <T> patch(value: T)
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/PatcherDefinitionFactory.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/PatcherDefinitionFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..d5a6f3821d2688651475625506a78efc6061ab82
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/PatcherDefinitionFactory.kt
@@ -0,0 +1,27 @@
+package theodolite.patcher
+
+import theodolite.util.PatcherDefinition
+import theodolite.util.TypeName
+
+/**
+ * The PatcherDefinition Factory creates a [PatcherDefinition]s.
+ *
+ * @constructor Create empty Patcher definition factory.
+ */
+class PatcherDefinitionFactory {
+    /**
+     * Creates a list of PatcherDefinitions
+     *
+     * @param requiredType indicates the required PatcherDefinitions
+     *     (for example `NumSensors`)
+     * @param patcherTypes list of TypeNames. A TypeName contains a type
+     *     (for example `NumSensors`) and a list of
+     *     PatcherDefinitions, which are related to this type.
+     * @return A list of PatcherDefinitions which corresponds to the
+     *     value of the requiredType.
+     */
+    fun createPatcherDefinition(requiredType: String, patcherTypes: List<TypeName>): List<PatcherDefinition> {
+        return patcherTypes.firstOrNull() { type -> type.typeName == requiredType }
+            ?.patchers ?: throw IllegalArgumentException("typeName $requiredType not found.")
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/PatcherFactory.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/PatcherFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..45e50113c964d671962fadc718994a29b2da81f4
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/PatcherFactory.kt
@@ -0,0 +1,84 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import theodolite.util.DeploymentFailedException
+import theodolite.util.InvalidPatcherConfigurationException
+import theodolite.util.PatcherDefinition
+
+/**
+ * The Patcher factory creates [Patcher]s
+ *
+ * @constructor Creates an empty PatcherFactory.
+ */
+class PatcherFactory {
+    /**
+     * Create patcher based on the given [PatcherDefinition] and
+     * the list of KubernetesResources.
+     *
+     * @param patcherDefinition The [PatcherDefinition] for which are
+     *     [Patcher] should be created.
+     * @param k8sResources List of all available Kubernetes resources.
+     *     This is a list of pairs<String, KubernetesResource>:
+     *     The frist corresponds to the filename where the resource is defined.
+     *     The second corresponds to the concrete [KubernetesResource] that should be patched.
+     * @return The created [Patcher].
+     * @throws IllegalArgumentException if no patcher can be created.
+     */
+    fun createPatcher(
+        patcherDefinition: PatcherDefinition,
+        k8sResources: List<Pair<String, KubernetesResource>>
+    ): Patcher {
+        val resource =
+            k8sResources.filter { it.first == patcherDefinition.resource }
+                .map { resource -> resource.second }
+                .firstOrNull()
+                ?: throw DeploymentFailedException("Could not find resource ${patcherDefinition.resource}")
+
+        return try {
+            when (patcherDefinition.type) {
+                "ReplicaPatcher" -> ReplicaPatcher(
+                    k8sResource = resource
+                )
+                "NumNestedGroupsLoadGeneratorReplicaPatcher" -> NumNestedGroupsLoadGeneratorReplicaPatcher(
+                    k8sResource = resource,
+                    loadGenMaxRecords = patcherDefinition.properties["loadGenMaxRecords"] !!,
+                    numSensors = patcherDefinition.properties["numSensors"] !!
+                )
+                "NumSensorsLoadGeneratorReplicaPatcher" -> NumSensorsLoadGeneratorReplicaPatcher(
+                    k8sResource = resource,
+                    loadGenMaxRecords = patcherDefinition.properties["loadGenMaxRecords"] !!
+                )
+                "EnvVarPatcher" -> EnvVarPatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"] !!,
+                    variableName = patcherDefinition.properties["variableName"] !!
+                )
+                "NodeSelectorPatcher" -> NodeSelectorPatcher(
+                    k8sResource = resource,
+                    variableName = patcherDefinition.properties["variableName"] !!
+                )
+                "ResourceLimitPatcher" -> ResourceLimitPatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"] !!,
+                    limitedResource = patcherDefinition.properties["limitedResource"] !!
+                )
+                "ResourceRequestPatcher" -> ResourceRequestPatcher(
+                    k8sResource = resource,
+                    container = patcherDefinition.properties["container"] !!,
+                    requestedResource = patcherDefinition.properties["requestedResource"] !!
+                )
+                "SchedulerNamePatcher" -> SchedulerNamePatcher(
+                    k8sResource = resource
+                )
+                "LabelPatcher" -> LabelPatcher(
+                    k8sResource = resource,
+                    variableName = patcherDefinition.properties["variableName"] !!
+                )
+                else -> throw InvalidPatcherConfigurationException("Patcher type ${patcherDefinition.type} not found.")
+            }
+        } catch (e: Exception) {
+            throw InvalidPatcherConfigurationException("Could not create patcher with type ${patcherDefinition.type}" +
+                    " Probably a required patcher argument was not specified." )
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ReplicaPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ReplicaPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4cc35f2ed74f9e366c266c3f98f1b3d36d4ba1b8
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ReplicaPatcher.kt
@@ -0,0 +1,19 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The Replica [Patcher] modifies the number of replicas for the given Kubernetes deployment.
+ *
+ * @param k8sResource  Kubernetes resource to be patched.
+ */
+class ReplicaPatcher(private val k8sResource: KubernetesResource) : AbstractPatcher(k8sResource) {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            if (value is kotlin.String) {
+                this.k8sResource.spec.replicas = Integer.parseInt(value)
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ResourceLimitPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ResourceLimitPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1a6fa35a944d00634ec0607b0bff34f4cb9d9b9c
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ResourceLimitPatcher.kt
@@ -0,0 +1,59 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.Container
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Quantity
+import io.fabric8.kubernetes.api.model.ResourceRequirements
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+
+/**
+ * The Resource limit [Patcher] set resource limits for deployments and statefulSets.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param container Container to be patched.
+ * @param limitedResource The resource to be limited (e.g. **cpu or memory**)
+ */
+class ResourceLimitPatcher(
+    private val k8sResource: KubernetesResource,
+    private val container: String,
+    private val limitedResource: String
+) : AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(value: String) {
+        when (k8sResource) {
+            is Deployment -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setLimits(it, value as kotlin.String)
+                }
+            }
+            is StatefulSet -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setLimits(it, value as kotlin.String)
+                }
+            }
+            else -> {
+                throw IllegalArgumentException("ResourceLimitPatcher not applicable for $k8sResource")
+            }
+        }
+    }
+
+    private fun setLimits(container: Container, value: String) {
+        when {
+            container.resources == null -> {
+                val resource = ResourceRequirements()
+                resource.limits = mapOf(limitedResource to Quantity(value))
+                container.resources = resource
+            }
+            container.resources.limits.isEmpty() -> {
+                container.resources.limits = mapOf(limitedResource to Quantity(value))
+            }
+            else -> {
+                val values = mutableMapOf<String, Quantity>()
+                container.resources.limits.forEach { entry -> values[entry.key] = entry.value }
+                values[limitedResource] = Quantity(value)
+                container.resources.limits = values
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ResourceRequestPatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ResourceRequestPatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9bf8c3c72f656d326ca3070cd5843778e5cdff42
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/ResourceRequestPatcher.kt
@@ -0,0 +1,59 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.Container
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.Quantity
+import io.fabric8.kubernetes.api.model.ResourceRequirements
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+
+/**
+ * The Resource request [Patcher] set resource limits for deployments and statefulSets.
+ *
+ * @param k8sResource Kubernetes resource to be patched.
+ * @param container Container to be patched.
+ * @param requestedResource The resource to be requested (e.g. **cpu or memory**)
+ */
+class ResourceRequestPatcher(
+    private val k8sResource: KubernetesResource,
+    private val container: String,
+    private val requestedResource: String
+) : AbstractPatcher(k8sResource) {
+
+    override fun <String> patch(value: String) {
+        when (k8sResource) {
+            is Deployment -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setRequests(it, value as kotlin.String)
+                }
+            }
+            is StatefulSet -> {
+                k8sResource.spec.template.spec.containers.filter { it.name == container }.forEach {
+                    setRequests(it, value as kotlin.String)
+                }
+            }
+            else -> {
+                throw IllegalArgumentException("ResourceRequestPatcher not applicable for $k8sResource")
+            }
+        }
+    }
+
+    private fun setRequests(container: Container, value: String) {
+        when {
+            container.resources == null -> {
+                val resource = ResourceRequirements()
+                resource.requests = mapOf(requestedResource to Quantity(value))
+                container.resources = resource
+            }
+            container.resources.requests.isEmpty() -> {
+                container.resources.requests = mapOf(requestedResource to Quantity(value))
+            }
+            else -> {
+                val values = mutableMapOf<String, Quantity>()
+                container.resources.requests.forEach { entry -> values[entry.key] = entry.value }
+                values[requestedResource] = Quantity(value)
+                container.resources.requests = values
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/patcher/SchedulerNamePatcher.kt b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/SchedulerNamePatcher.kt
new file mode 100644
index 0000000000000000000000000000000000000000..348f0c50090a34c91221d3e099c3532375a578da
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/patcher/SchedulerNamePatcher.kt
@@ -0,0 +1,17 @@
+package theodolite.patcher
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.api.model.apps.Deployment
+
+/**
+ * The Scheduler name [Patcher] make it possible to set the scheduler which should
+ * be used to deploy the given deployment.
+ * @param k8sResource Kubernetes resource to be patched.
+ */
+class SchedulerNamePatcher(private val k8sResource: KubernetesResource) : Patcher {
+    override fun <String> patch(value: String) {
+        if (k8sResource is Deployment) {
+            k8sResource.spec.template.spec.schedulerName = value as kotlin.String
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/StrategyFactory.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/StrategyFactory.kt
new file mode 100644
index 0000000000000000000000000000000000000000..829370e8ce1c181c1a4cb9fdd8ccf0ecefd48d3d
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/StrategyFactory.kt
@@ -0,0 +1,54 @@
+package theodolite.strategies
+
+import theodolite.execution.BenchmarkExecutor
+import theodolite.strategies.restriction.LowerBoundRestriction
+import theodolite.strategies.restriction.RestrictionStrategy
+import theodolite.strategies.searchstrategy.BinarySearch
+import theodolite.strategies.searchstrategy.FullSearch
+import theodolite.strategies.searchstrategy.LinearSearch
+import theodolite.strategies.searchstrategy.SearchStrategy
+import theodolite.util.Results
+
+/**
+ * Factory for creating [SearchStrategy] and [RestrictionStrategy] strategies.
+ */
+class StrategyFactory {
+
+    /**
+     * Create a [SearchStrategy].
+     *
+     * @param executor The [theodolite.execution.BenchmarkExecutor] that executes individual experiments.
+     * @param searchStrategyString Specifies the [SearchStrategy]. Must either be the string 'LinearSearch',
+     * or 'BinarySearch'.
+     *
+     * @throws IllegalArgumentException if the [SearchStrategy] was not one of the allowed options.
+     */
+    fun createSearchStrategy(executor: BenchmarkExecutor, searchStrategyString: String): SearchStrategy {
+        return when (searchStrategyString) {
+            "FullSearch" -> FullSearch(executor)
+            "LinearSearch" -> LinearSearch(executor)
+            "BinarySearch" -> BinarySearch(executor)
+            else -> throw IllegalArgumentException("Search Strategy $searchStrategyString not found")
+        }
+    }
+
+    /**
+     * Create a [RestrictionStrategy].
+     *
+     * @param results The [Results] saves the state of the Theodolite benchmark run.
+     * @param restrictionStrings Specifies the list of [RestrictionStrategy] that are used to restrict the amount
+     * of [theodolite.util.Resource] for a fixed LoadDimension. Must equal the string
+     * 'LowerBound'.
+     *
+     * @throws IllegalArgumentException if param searchStrategyString was not one of the allowed options.
+     */
+    fun createRestrictionStrategy(results: Results, restrictionStrings: List<String>): Set<RestrictionStrategy> {
+        return restrictionStrings
+            .map { restriction ->
+                when (restriction) {
+                    "LowerBound" -> LowerBoundRestriction(results)
+                    else -> throw IllegalArgumentException("Restriction Strategy $restrictionStrings not found")
+                }
+            }.toSet()
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/restriction/LowerBoundRestriction.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/restriction/LowerBoundRestriction.kt
new file mode 100644
index 0000000000000000000000000000000000000000..13bfedfe055f2bd428137f89b2986f3967ec797c
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/restriction/LowerBoundRestriction.kt
@@ -0,0 +1,24 @@
+package theodolite.strategies.restriction
+
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+/**
+ * The [LowerBoundRestriction] sets the lower bound of the resources to be examined to the value
+ * needed to successfully execute the next smaller load.
+ *
+ * @param results [Result] object used as a basis to restrict the resources.
+ */
+class LowerBoundRestriction(results: Results) : RestrictionStrategy(results) {
+
+    override fun apply(load: LoadDimension, resources: List<Resource>): List<Resource> {
+        val maxLoad: LoadDimension? = this.results.getMaxBenchmarkedLoad(load)
+        var lowerBound: Resource? = this.results.getMinRequiredInstances(maxLoad)
+        if (lowerBound == null) {
+            lowerBound = resources[0]
+        }
+        return resources.filter { x -> x.get() >= lowerBound.get() }
+    }
+
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/restriction/RestrictionStrategy.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/restriction/RestrictionStrategy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1ab7302d7898daad729b1c94c32d97138b5cdcf4
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/restriction/RestrictionStrategy.kt
@@ -0,0 +1,25 @@
+package theodolite.strategies.restriction
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+/**
+ * A 'Restriction Strategy' restricts a list of resources based on the current
+ * results of all previously performed benchmarks.
+ *
+ * @param results the [Results] object
+ */
+@RegisterForReflection
+abstract class RestrictionStrategy(val results: Results) {
+    /**
+     * Apply the restriction of the given resource list for the given load based on the results object.
+     *
+     * @param load [LoadDimension] for which a subset of resources are required.
+     * @param resources List of [Resource]s to be restricted.
+     * @return Returns a list containing only elements that have not been filtered out by the
+     * restriction (possibly empty).
+     */
+    abstract fun apply(load: LoadDimension, resources: List<Resource>): List<Resource>
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/BinarySearch.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/BinarySearch.kt
new file mode 100644
index 0000000000000000000000000000000000000000..28e8194c699cd074026c8cb7e6f3ce4ec347023b
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/BinarySearch.kt
@@ -0,0 +1,61 @@
+package theodolite.strategies.searchstrategy
+
+import mu.KotlinLogging
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ *  Binary-search-like implementation for determining the smallest suitable number of instances.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+class BinarySearch(benchmarkExecutor: BenchmarkExecutor) : SearchStrategy(benchmarkExecutor) {
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        val result = binarySearch(load, resources, 0, resources.size - 1)
+        if (result == -1) {
+            return null
+        }
+        return resources[result]
+    }
+
+    /**
+     * Apply binary search.
+     *
+     * @param load the load dimension to perform experiments for
+     * @param resources the list in which binary search is performed
+     * @param lower lower bound for binary search (inclusive)
+     * @param upper upper bound for binary search (inclusive)
+     */
+    private fun binarySearch(load: LoadDimension, resources: List<Resource>, lower: Int, upper: Int): Int {
+        if (lower > upper) {
+            throw IllegalArgumentException()
+        }
+        // special case:  length == 1 or 2
+        if (lower == upper) {
+            val res = resources[lower]
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            if (this.benchmarkExecutor.runExperiment(load, resources[lower])) return lower
+            else {
+                if (lower + 1 == resources.size) return -1
+                return lower + 1
+            }
+        } else {
+            // apply binary search for a list with
+            // length > 2 and adjust upper and lower depending on the result for `resources[mid]`
+            val mid = (upper + lower) / 2
+            val res = resources[mid]
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            if (this.benchmarkExecutor.runExperiment(load, resources[mid])) {
+                if (mid == lower) {
+                    return lower
+                }
+                return binarySearch(load, resources, lower, mid - 1)
+            } else {
+                return binarySearch(load, resources, mid + 1, upper)
+            }
+        }
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/CompositeStrategy.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/CompositeStrategy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..41cc5c325163ade54469398e815fdb8d95c6e6cd
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/CompositeStrategy.kt
@@ -0,0 +1,30 @@
+package theodolite.strategies.searchstrategy
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.execution.BenchmarkExecutor
+import theodolite.strategies.restriction.RestrictionStrategy
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+/**
+ *  Composite strategy that combines a SearchStrategy and a set of RestrictionStrategy.
+ *
+ * @param searchStrategy the [SearchStrategy] that is executed as part of this [CompositeStrategy].
+ * @param restrictionStrategies the set of [RestrictionStrategy] that are connected conjunctive to restrict the [Resource]
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+@RegisterForReflection
+class CompositeStrategy(
+    benchmarkExecutor: BenchmarkExecutor,
+    private val searchStrategy: SearchStrategy,
+    val restrictionStrategies: Set<RestrictionStrategy>
+) : SearchStrategy(benchmarkExecutor) {
+
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        var restrictedResources = resources.toList()
+        for (strategy in this.restrictionStrategies) {
+            restrictedResources = restrictedResources.intersect(strategy.apply(load, resources)).toList()
+        }
+        return this.searchStrategy.findSuitableResource(load, restrictedResources)
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/FullSearch.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/FullSearch.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cb0dd2d8ab528e42e8290f59f26c8b9b32f384c7
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/FullSearch.kt
@@ -0,0 +1,31 @@
+package theodolite.strategies.searchstrategy
+
+import mu.KotlinLogging
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * [SearchStrategy] that executes experiment for provides resources in a linear-search-like fashion, but **without
+ * stopping** once a suitable resource amount is found.
+ *
+ * @see LinearSearch for a SearchStrategy that stops once a suitable resource amount is found.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+class FullSearch(benchmarkExecutor: BenchmarkExecutor) : SearchStrategy(benchmarkExecutor) {
+
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        var minimalSuitableResources: Resource? = null
+        for (res in resources) {
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            val result = this.benchmarkExecutor.runExperiment(load, res)
+            if (result && minimalSuitableResources != null) {
+                minimalSuitableResources = res
+            }
+        }
+        return minimalSuitableResources
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/LinearSearch.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/LinearSearch.kt
new file mode 100644
index 0000000000000000000000000000000000000000..85deaf6fa75437199bfc560404eb5b40bb4a986a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/LinearSearch.kt
@@ -0,0 +1,25 @@
+package theodolite.strategies.searchstrategy
+
+import mu.KotlinLogging
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ *  Linear-search-like implementation for determining the smallest suitable number of instances.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+class LinearSearch(benchmarkExecutor: BenchmarkExecutor) : SearchStrategy(benchmarkExecutor) {
+
+    override fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource? {
+        for (res in resources) {
+
+            logger.info { "Running experiment with load '${load.get()}' and resources '${res.get()}'" }
+            if (this.benchmarkExecutor.runExperiment(load, res)) return res
+        }
+        return null
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/SearchStrategy.kt b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/SearchStrategy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4e304b010d4d56f6b5fe734a6b977361f93e57a1
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/strategies/searchstrategy/SearchStrategy.kt
@@ -0,0 +1,24 @@
+package theodolite.strategies.searchstrategy
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+/**
+ *  Base class for the implementation for SearchStrategies. SearchStrategies determine the smallest suitable number of instances.
+ *
+ * @param benchmarkExecutor Benchmark executor which runs the individual benchmarks.
+ */
+@RegisterForReflection
+abstract class SearchStrategy(val benchmarkExecutor: BenchmarkExecutor) {
+    /**
+     * Find smallest suitable resource from the specified resource list for the given load.
+     *
+     * @param load the [LoadDimension] to be tested.
+     * @param resources List of all possible [Resource]s.
+     *
+     * @return suitable resource for the specified load, or null if no suitable resource exists.
+     */
+    abstract fun findSuitableResource(load: LoadDimension, resources: List<Resource>): Resource?
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/Config.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/Config.kt
new file mode 100644
index 0000000000000000000000000000000000000000..afbf784e9d6d72939615e367b54891ecd95a3608
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/Config.kt
@@ -0,0 +1,18 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.strategies.searchstrategy.CompositeStrategy
+
+/**
+ * Config class that represents a configuration of a theodolite run.
+ *
+ * @param loads the [LoadDimension] of the execution
+ * @param resources the [Resource] of the execution
+ * @param compositeStrategy the [CompositeStrategy] of the execution
+ */
+@RegisterForReflection
+data class Config(
+    val loads: List<LoadDimension>,
+    val resources: List<Resource>,
+    val compositeStrategy: CompositeStrategy
+)
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/ConfigurationOverride.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/ConfigurationOverride.kt
new file mode 100644
index 0000000000000000000000000000000000000000..537b44721bb344c2cd7af71d29dc4fa3da5a7a33
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/ConfigurationOverride.kt
@@ -0,0 +1,21 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Representation of a configuration override.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class ConfigurationOverride {
+    /**
+     * Patcher of the configuration override.
+     */
+    lateinit var patcher: PatcherDefinition
+
+    /**
+     * Value of the patched configuration override.
+     */
+    lateinit var value: String
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/DeploymentFailedException.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/DeploymentFailedException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..0e276d7de4e205a75eb309a71a793e70f7565ea4
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/DeploymentFailedException.kt
@@ -0,0 +1,5 @@
+package theodolite.util
+
+
+class DeploymentFailedException(message:String): Exception(message) {
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/IOHandler.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/IOHandler.kt
new file mode 100644
index 0000000000000000000000000000000000000000..8d379fcf0543257edafd2e45383a02ba0254563d
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/IOHandler.kt
@@ -0,0 +1,94 @@
+package theodolite.util
+
+import com.google.gson.GsonBuilder
+import mu.KotlinLogging
+import java.io.File
+import java.io.PrintWriter
+
+private val logger = KotlinLogging.logger {}
+
+/**
+ * The IOHandler handles most common I/O operations within the Theodolite framework
+ */
+class IOHandler {
+
+    /**
+     * The location in which Theodolite store result and configuration file are depends on
+     * the values of the environment variables `RESULT_FOLDER` and `CREATE_RESULTS_FOLDER`
+     *
+     * @return the URL of the result folder
+     */
+    fun getResultFolderURL(): String {
+        var resultsFolder: String = System.getenv("RESULTS_FOLDER") ?: ""
+        val createResultsFolder = System.getenv("CREATE_RESULTS_FOLDER") ?: "false"
+
+        if (resultsFolder != ""){
+            logger.info { "RESULT_FOLDER: $resultsFolder" }
+            val directory = File(resultsFolder)
+            if (!directory.exists()) {
+                logger.error { "Folder $resultsFolder does not exist" }
+                if (createResultsFolder.toBoolean()) {
+                    directory.mkdirs()
+                } else {
+                    throw IllegalArgumentException("Result folder not found")
+                }
+            }
+            resultsFolder += "/"
+        }
+        return  resultsFolder
+    }
+
+    /**
+     * Read a file as String
+     *
+     * @param fileURL the URL of the file
+     * @return The content of the file as String
+     */
+    fun readFileAsString(fileURL: String): String {
+        return File(fileURL).inputStream().readBytes().toString(Charsets.UTF_8).trim()
+    }
+
+    /**
+     * Creates a JSON string of the given object and store them to file
+     *
+     * @param T class of the object to save
+     * @param objectToSave object which should be saved as file
+     * @param fileURL the URL of the file
+     */
+    fun <T> writeToJSONFile(objectToSave: T, fileURL: String) {
+        val gson = GsonBuilder().enableComplexMapKeySerialization().setPrettyPrinting().create()
+        writeStringToTextFile(fileURL, gson.toJson(objectToSave))
+    }
+
+    /**
+     * Write to CSV file
+     *
+     * @param fileURL the URL of the file
+     * @param data  the data to write in the file, as list of list, each subList corresponds to a row in the CSV file
+     * @param columns columns of the CSV file
+     */
+    fun writeToCSVFile(fileURL: String, data: List<List<String>>, columns: List<String>) {
+        val outputFile = File("$fileURL.csv")
+        PrintWriter(outputFile).use { pw ->
+            pw.println(columns.joinToString(separator=","))
+            data.forEach {
+                pw.println(it.joinToString(separator=","))
+            }
+        }
+        logger.info { "Wrote CSV file: $fileURL to ${outputFile.absolutePath}." }
+    }
+
+    /**
+     * Write to text file
+     *
+     * @param fileURL the URL of the file
+     * @param data the data to write in the file as String
+     */
+    fun writeStringToTextFile(fileURL: String, data: String) {
+        val outputFile = File("$fileURL")
+        outputFile.printWriter().use {
+                it.println(data)
+        }
+        logger.info { "Wrote txt file: $fileURL to ${outputFile.absolutePath}." }
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/InvalidPatcherConfigurationException.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/InvalidPatcherConfigurationException.kt
new file mode 100644
index 0000000000000000000000000000000000000000..c103ef1f35a1b3ffa56dad50c7cf6c1db51eb57f
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/InvalidPatcherConfigurationException.kt
@@ -0,0 +1,5 @@
+package theodolite.util
+
+class InvalidPatcherConfigurationException(message:String): Exception(message) {
+}
+
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/KafkaConfig.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/KafkaConfig.kt
new file mode 100644
index 0000000000000000000000000000000000000000..4e72ccb0d86749a6538c26556241ac114ef8d9a4
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/KafkaConfig.kt
@@ -0,0 +1,69 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+import theodolite.util.KafkaConfig.TopicWrapper
+import kotlin.properties.Delegates
+import kotlin.reflect.KProperty
+
+/**
+ * Configuration of Kafka connection.
+ *
+ * @see TopicWrapper
+ */
+@RegisterForReflection
+@JsonDeserialize
+class KafkaConfig {
+    /**
+     * The bootstrap server connection string
+     */
+    lateinit var bootstrapServer: String
+
+    /**
+     * The list of topics
+     */
+    lateinit var topics: List<TopicWrapper>
+
+    /**
+     * Wrapper for a topic definition.
+     */
+    @RegisterForReflection
+    @JsonDeserialize
+    class TopicWrapper {
+        /**
+         * The topic name
+         */
+        lateinit var name: String
+
+        /**
+         * The number of partitions
+         */
+        var numPartitions by Delegates.notNull<Int>()
+
+        /**
+         * The replication factor of this topic
+         */
+        var replicationFactor by Delegates.notNull<Short>()
+
+        /**
+         * If remove only, this topic would only used to delete all topics, which has the name of the topic as a prefix.
+         */
+        var removeOnly by DelegatesFalse()
+    }
+}
+
+/**
+ * Delegates to initialize a lateinit boolean to false
+ */
+@RegisterForReflection
+class DelegatesFalse {
+    private var state = false
+    operator fun getValue(thisRef: Any?, property: KProperty<*>): Boolean {
+        return state
+    }
+
+    operator fun setValue(thisRef: Any?, property: KProperty<*>, value: Boolean) {
+        state = value
+    }
+
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/LoadDimension.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/LoadDimension.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cf26da979b05f0a2bd82289ce371715ea0d67c93
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/LoadDimension.kt
@@ -0,0 +1,26 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Representation of the load dimensions for a execution of theodolite.
+ *
+ * @param number the value of this [LoadDimension]
+ * @param type [PatcherDefinition] of this [LoadDimension]
+ */
+@RegisterForReflection
+data class LoadDimension(private val number: Int, private val type: List<PatcherDefinition>) {
+    /**
+     * @return the value of this load dimension.
+     */
+    fun get(): Int {
+        return this.number
+    }
+
+    /**
+     * @return the list of [PatcherDefinition]
+     */
+    fun getType(): List<PatcherDefinition> {
+        return this.type
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/Parser.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/Parser.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e435b1cbbf18b9f860ceda69f5f7ec66e64c9375
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/Parser.kt
@@ -0,0 +1,16 @@
+package theodolite.util
+
+/**
+ * Interface for parsers.
+ * A parser allows the reading of files and creates a corresponding object from them.
+ */
+interface Parser {
+    /**
+     * Parse a file.
+     *
+     * @param path The path of the file
+     * @param E The class of the type to parse
+     * @param T The type to parse
+     */
+    fun <T> parse(path: String, E: Class<T>): T?
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/PatcherDefinition.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/PatcherDefinition.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6ec0cce36751ec0343d40aa49fefa44f4c7fc918
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/PatcherDefinition.kt
@@ -0,0 +1,25 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import com.fasterxml.jackson.databind.annotation.JsonSerialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Definition of the structure of a [theodolite.patcher.AbstractPatcher] which implements the [theodolite.patcher.Patcher] interface.
+ */
+@JsonDeserialize
+@RegisterForReflection
+class PatcherDefinition {
+    /**
+     * The type of the patcher
+     */
+    lateinit var type: String
+
+    /**
+     * The resource which the patcher is applied to
+     */
+    lateinit var resource: String
+
+    @JsonSerialize
+    lateinit var properties: MutableMap<String, String>
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/PrometheusResponse.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/PrometheusResponse.kt
new file mode 100644
index 0000000000000000000000000000000000000000..846577387c425e920da1c2fca1f972c880e1540a
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/PrometheusResponse.kt
@@ -0,0 +1,82 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+import java.util.*
+
+/**
+ * This class corresponds to the JSON response format of a Prometheus
+ * [range-query](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-queries)
+ */
+@RegisterForReflection
+data class PrometheusResponse(
+    /**
+     * Indicates whether the query was successful.
+     */
+    var status: String? = null,
+    /**
+     * The data section of the query result contains the information about the resultType and the values itself.
+     */
+    var data: PromData? = null
+)
+{
+    /**
+     * Return the data of the PrometheusResponse as [List] of [List]s of [String]s
+     * The format of the returned list is: `[[ group, timestamp, value ], [ group, timestamp, value ], ... ]`
+     */
+    fun getResultAsList(): List<List<String>> {
+        val group = data?.result?.get(0)?.metric?.group.toString()
+        val values = data?.result?.get(0)?.values
+        val result = mutableListOf<List<String>>()
+
+        if (values != null) {
+            for (value in values) {
+                val valueList = value as List<*>
+                val timestamp = (valueList[0] as Double).toLong().toString()
+                val value = valueList[1].toString()
+                result.add(listOf(group, timestamp, value))
+            }
+        }
+        return Collections.unmodifiableList(result)
+    }
+}
+
+/**
+ * Description of Prometheus data.
+ *
+ * Based on [PromResult]
+ */
+@RegisterForReflection
+data class PromData(
+    /**
+     * Type of the result, either  "matrix" | "vector" | "scalar" | "string"
+     */
+    var resultType: String? = null,
+    /**
+     * Result of the range-query. In the case of range-query this corresponds to the [range-vectors result format](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors)
+     */
+    var result: List<PromResult>? = null
+)
+
+/**
+ * PromResult corresponds to the [range-vectors result format](https://www.prometheus.io/docs/prometheus/latest/querying/api/#range-vectors)
+ */
+@RegisterForReflection
+data class PromResult(
+    /**
+     * Label of the metric
+     */
+    var metric: PromMetric? = null,
+    /**
+     *  Values of the metric (e.g. [ [ <unix_time>, "<sample_value>" ], ... ])
+     */
+    var values: List<Any>? = null
+)
+
+/**
+ * Corresponds to the metric field in the range-vector result format of a Prometheus range-query response.
+ */
+@RegisterForReflection
+data class PromMetric(
+    var group: String? = null
+)
+
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/Resource.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/Resource.kt
new file mode 100644
index 0000000000000000000000000000000000000000..1d6410aa4288e19817e3ba48bfd1bc0d85d006a2
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/Resource.kt
@@ -0,0 +1,24 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Representation of the resources for an execution of Theodolite.
+ */
+@RegisterForReflection
+data class Resource(private val number: Int, private val type: List<PatcherDefinition>) {
+
+    /**
+     * @return the value of this resource.
+     */
+    fun get(): Int {
+        return this.number
+    }
+
+    /**
+     * @return the list of [PatcherDefinition]
+     */
+    fun getType(): List<PatcherDefinition> {
+        return this.type
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/Results.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/Results.kt
new file mode 100644
index 0000000000000000000000000000000000000000..60641ea0248435de53aaaaf362da7be995b391c5
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/Results.kt
@@ -0,0 +1,86 @@
+package theodolite.util
+
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * Central class that saves the state of a execution of Theodolite. For an execution, it is used to save the result of
+ * individual experiments. Further, it is used by the RestrictionStrategy to
+ * perform the [theodolite.strategies.restriction.RestrictionStrategy].
+ */
+@RegisterForReflection
+class Results {
+    private val results: MutableMap<Pair<LoadDimension, Resource>, Boolean> = mutableMapOf()
+
+    /**
+     * Set the result for an experiment.
+     *
+     * @param experiment A pair that identifies the experiment by the [LoadDimension] and [Resource].
+     * @param successful the result of the experiment. Successful == true and Unsuccessful == false.
+     */
+    fun setResult(experiment: Pair<LoadDimension, Resource>, successful: Boolean) {
+        this.results[experiment] = successful
+    }
+
+    /**
+     * Get the result for an experiment.
+     *
+     * @param experiment A pair that identifies the experiment by the [LoadDimension] and [Resource].
+     * @return true if the experiment was successful and false otherwise. If the result has not been reported so far,
+     * null is returned.
+     *
+     * @see Resource
+     */
+    fun getResult(experiment: Pair<LoadDimension, Resource>): Boolean? {
+        return this.results[experiment]
+    }
+
+    /**
+     * Get the smallest suitable number of instances for a specified [LoadDimension].
+     *
+     * @param load the [LoadDimension]
+     *
+     * @return the smallest suitable number of resources. If the experiment was not executed yet,
+     * a @see Resource with the constant Int.MAX_VALUE as value is returned.
+     * If no experiments have been marked as either successful or unsuccessful
+     * yet, a Resource with the constant value Int.MIN_VALUE is returned.
+     */
+    fun getMinRequiredInstances(load: LoadDimension?): Resource? {
+        if (this.results.isEmpty()) {
+            return Resource(Int.MIN_VALUE, emptyList())
+        }
+
+        var minRequiredInstances: Resource? = Resource(Int.MAX_VALUE, emptyList())
+        for (experiment in results) {
+            // Get all successful experiments for requested load
+            if (experiment.key.first == load && experiment.value) {
+                if (minRequiredInstances == null || experiment.key.second.get() < minRequiredInstances.get()) {
+                    // Found new smallest resources
+                    minRequiredInstances = experiment.key.second
+                }
+            }
+        }
+        return minRequiredInstances
+    }
+
+    /**
+     * Get the largest [LoadDimension] that has been reported executed successfully (or unsuccessfully) so far, for a
+     * [LoadDimension] and is smaller than the given [LoadDimension].
+     *
+     * @param load the [LoadDimension]
+     *
+     * @return the largest [LoadDimension] or null, if there is none for this [LoadDimension]
+     */
+    fun getMaxBenchmarkedLoad(load: LoadDimension): LoadDimension? {
+        var maxBenchmarkedLoad: LoadDimension? = null
+        for (experiment in results) {
+            if (experiment.key.first.get() <= load.get()) {
+                if (maxBenchmarkedLoad == null) {
+                    maxBenchmarkedLoad = experiment.key.first
+                } else if (maxBenchmarkedLoad.get() < experiment.key.first.get()) {
+                    maxBenchmarkedLoad = experiment.key.first
+                }
+            }
+        }
+        return maxBenchmarkedLoad
+    }
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/TypeName.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/TypeName.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f20fc7c9ce6757be75d9317e76c23a68b09914bd
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/TypeName.kt
@@ -0,0 +1,14 @@
+package theodolite.util
+
+import com.fasterxml.jackson.databind.annotation.JsonDeserialize
+import io.quarkus.runtime.annotations.RegisterForReflection
+
+/**
+ * The TypeName encapsulates a list of [PatcherDefinition] along with a typeName that specifies for what the [PatcherDefinition] should be used.
+ */
+@RegisterForReflection
+@JsonDeserialize
+class TypeName {
+    lateinit var typeName: String
+    lateinit var patchers: List<PatcherDefinition>
+}
diff --git a/theodolite-quarkus/src/main/kotlin/theodolite/util/YamlParser.kt b/theodolite-quarkus/src/main/kotlin/theodolite/util/YamlParser.kt
new file mode 100644
index 0000000000000000000000000000000000000000..ce69894e4145372aef07286ae315d11631a4df3f
--- /dev/null
+++ b/theodolite-quarkus/src/main/kotlin/theodolite/util/YamlParser.kt
@@ -0,0 +1,18 @@
+package theodolite.util
+
+import org.yaml.snakeyaml.Yaml
+import org.yaml.snakeyaml.constructor.Constructor
+import java.io.File
+import java.io.FileInputStream
+import java.io.InputStream
+
+/**
+ * The YamlParser parses a YAML file
+ */
+class YamlParser : Parser {
+    override fun <T> parse(path: String, E: Class<T>): T? {
+        val input: InputStream = FileInputStream(File(path))
+        val parser = Yaml(Constructor(E))
+        return parser.loadAs(input, E)
+    }
+}
diff --git a/theodolite-quarkus/src/main/resources/application.properties b/theodolite-quarkus/src/main/resources/application.properties
new file mode 100644
index 0000000000000000000000000000000000000000..42647e2391706286602945cf2be7baa96857ba19
--- /dev/null
+++ b/theodolite-quarkus/src/main/resources/application.properties
@@ -0,0 +1,6 @@
+quarkus.native.additional-build-args=\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.internal.CertUtils,\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.dsl.internal.uploadable.PodUpload,\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.dsl.internal.core.v1.PodOperationsImpl$1,\
+  --initialize-at-run-time=io.fabric8.kubernetes.client.dsl.internal.core.v1.PodOperationsImpl$3,\
+  --report-unsupported-elements-at-runtime
diff --git a/theodolite-quarkus/src/main/resources/operator/example-execution-k8s-resource.yaml b/theodolite-quarkus/src/main/resources/operator/example-execution-k8s-resource.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b81bbcd442834136283dc080f5f6a79bbc1cd415
--- /dev/null
+++ b/theodolite-quarkus/src/main/resources/operator/example-execution-k8s-resource.yaml
@@ -0,0 +1,29 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: theodolite-example-execution
+spec:  
+  benchmark: uc1-kstreams
+  load:  
+    loadType: "NumSensors"
+    loadValues:
+      - 50000 
+  resources:
+    resourceType: "Instances"
+    resourceValues:
+      - 1
+  slos:
+    - sloType: "lag trend"
+      threshold: 1000
+      prometheusUrl: "http://localhost:32656"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 0
+  execution:
+    strategy: "LinearSearch"
+    duration: 60
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/CompositeStrategyTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/CompositeStrategyTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..49131352cfe517a382ddd7aa1be09d3fbe317466
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/CompositeStrategyTest.kt
@@ -0,0 +1,117 @@
+package theodolite
+
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Test
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.strategies.restriction.LowerBoundRestriction
+import theodolite.strategies.searchstrategy.BinarySearch
+import theodolite.strategies.searchstrategy.CompositeStrategy
+import theodolite.strategies.searchstrategy.LinearSearch
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+@QuarkusTest
+class CompositeStrategyTest {
+
+    @Test
+    fun testEnd2EndLinearSearch() {
+        val mockResults = arrayOf(
+            arrayOf(true, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, false, true, true, true, true),
+            arrayOf(false, false, false, false, true, true, true),
+            arrayOf(false, false, false, false, false, false, true),
+            arrayOf(false, false, false, false, false, false, false)
+        )
+        val mockLoads: List<LoadDimension> = (0..6).map { number -> LoadDimension(number, emptyList()) }
+        val mockResources: List<Resource> = (0..6).map { number -> Resource(number, emptyList()) }
+        val results = Results()
+        val benchmark = TestBenchmark()
+        val sloChecker: BenchmarkExecution.Slo = BenchmarkExecution.Slo()
+        val benchmarkExecutor = TestBenchmarkExecutorImpl(mockResults, benchmark, results, sloChecker, 0, 0, 5)
+        val linearSearch = LinearSearch(benchmarkExecutor)
+        val lowerBoundRestriction = LowerBoundRestriction(results)
+        val strategy =
+            CompositeStrategy(benchmarkExecutor, linearSearch, setOf(lowerBoundRestriction))
+
+        val actual: ArrayList<Resource?> = ArrayList()
+        val expected: ArrayList<Resource?> = ArrayList(listOf(0, 2, 2, 3, 4, 6).map { x -> Resource(x, emptyList()) })
+        expected.add(null)
+
+        for (load in mockLoads) {
+            actual.add(strategy.findSuitableResource(load, mockResources))
+        }
+
+        assertEquals(actual, expected)
+    }
+
+    @Test
+    fun testEnd2EndBinarySearch() {
+        val mockResults = arrayOf(
+            arrayOf(true, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true),
+            arrayOf(false, false, false, true, true, true, true),
+            arrayOf(false, false, false, false, true, true, true),
+            arrayOf(false, false, false, false, false, false, true),
+            arrayOf(false, false, false, false, false, false, false)
+        )
+        val mockLoads: List<LoadDimension> = (0..6).map { number -> LoadDimension(number, emptyList()) }
+        val mockResources: List<Resource> = (0..6).map { number -> Resource(number, emptyList()) }
+        val results = Results()
+        val benchmark = TestBenchmark()
+        val sloChecker: BenchmarkExecution.Slo = BenchmarkExecution.Slo()
+        val benchmarkExecutorImpl =
+            TestBenchmarkExecutorImpl(mockResults, benchmark, results, sloChecker, 0, 0, 0)
+        val binarySearch = BinarySearch(benchmarkExecutorImpl)
+        val lowerBoundRestriction = LowerBoundRestriction(results)
+        val strategy =
+            CompositeStrategy(benchmarkExecutorImpl, binarySearch, setOf(lowerBoundRestriction))
+
+        val actual: ArrayList<Resource?> = ArrayList()
+        val expected: ArrayList<Resource?> = ArrayList(listOf(0, 2, 2, 3, 4, 6).map { x -> Resource(x, emptyList()) })
+        expected.add(null)
+
+        for (load in mockLoads) {
+            actual.add(strategy.findSuitableResource(load, mockResources))
+        }
+
+        assertEquals(actual, expected)
+    }
+
+    @Test
+    fun testEnd2EndBinarySearch2() {
+        val mockResults = arrayOf(
+            arrayOf(true, true, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true, true),
+            arrayOf(false, false, true, true, true, true, true, true),
+            arrayOf(false, false, false, true, true, true, true, true),
+            arrayOf(false, false, false, false, true, true, true, true),
+            arrayOf(false, false, false, false, false, false, true, true),
+            arrayOf(false, false, false, false, false, false, false, true)
+        )
+        val mockLoads: List<LoadDimension> = (0..6).map { number -> LoadDimension(number, emptyList()) }
+        val mockResources: List<Resource> = (0..7).map { number -> Resource(number, emptyList()) }
+        val results = Results()
+        val benchmark = TestBenchmark()
+        val sloChecker: BenchmarkExecution.Slo = BenchmarkExecution.Slo()
+        val benchmarkExecutor = TestBenchmarkExecutorImpl(mockResults, benchmark, results, sloChecker, 0, 0, 0)
+        val binarySearch = BinarySearch(benchmarkExecutor)
+        val lowerBoundRestriction = LowerBoundRestriction(results)
+        val strategy =
+            CompositeStrategy(benchmarkExecutor, binarySearch, setOf(lowerBoundRestriction))
+
+        val actual: ArrayList<Resource?> = ArrayList()
+        val expected: ArrayList<Resource?> =
+            ArrayList(listOf(0, 2, 2, 3, 4, 6, 7).map { x -> Resource(x, emptyList()) })
+
+        for (load in mockLoads) {
+            actual.add(strategy.findSuitableResource(load, mockResources))
+        }
+
+        assertEquals(actual, expected)
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/ResourceLimitPatcherTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/ResourceLimitPatcherTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..e88192dd7fe4393494a4fb76bd74d1123bd75f1d
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/ResourceLimitPatcherTest.kt
@@ -0,0 +1,89 @@
+package theodolite
+
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.quarkus.test.junit.QuarkusTest
+import io.smallrye.common.constraint.Assert.assertTrue
+import org.junit.jupiter.api.Test
+import theodolite.k8s.K8sResourceLoader
+import theodolite.patcher.PatcherFactory
+import theodolite.util.PatcherDefinition
+
+/**
+ * Resource patcher test
+ *
+ * This class tested 4 scenarios for the ResourceLimitPatcher and the ResourceRequestPatcher.
+ * The different test cases specifies four possible situations:
+ * Case 1:  In the given YAML declaration memory and cpu are defined
+ * Case 2:  In the given YAML declaration only cpu is defined
+ * Case 3:  In the given YAML declaration only memory is defined
+ * Case 4:  In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+ */
+@QuarkusTest
+class ResourceLimitPatcherTest {
+    val testPath = "./src/test/resources/"
+    val loader = K8sResourceLoader(DefaultKubernetesClient().inNamespace(""))
+    val patcherFactory = PatcherFactory()
+
+    fun applyTest(fileName: String) {
+        val cpuValue = "50m"
+        val memValue = "3Gi"
+        val k8sResource = loader.loadK8sResource("Deployment", testPath + fileName) as Deployment
+
+        val defCPU = PatcherDefinition()
+        defCPU.resource = "cpu-memory-deployment.yaml"
+        defCPU.type = "ResourceLimitPatcher"
+        defCPU.properties = mutableMapOf(
+            "limitedResource" to "cpu",
+            "container" to "application"
+        )
+
+        val defMEM = PatcherDefinition()
+        defMEM.resource = "cpu-memory-deployment.yaml"
+        defMEM.type = "ResourceLimitPatcher"
+        defMEM.properties = mutableMapOf(
+            "limitedResource" to "memory",
+            "container" to "uc-application"
+        )
+
+        patcherFactory.createPatcher(
+            patcherDefinition = defCPU,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = cpuValue)
+
+        patcherFactory.createPatcher(
+            patcherDefinition = defMEM,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = memValue)
+
+        k8sResource.spec.template.spec.containers.filter { it.name == defCPU.properties["container"]!! }
+            .forEach {
+                assertTrue(it.resources.limits["cpu"].toString() == cpuValue)
+                assertTrue(it.resources.limits["memory"].toString() == memValue)
+            }
+    }
+
+    @Test
+    fun testWithExistingCpuAndMemoryDeclarations() {
+        // Case 1: In the given YAML declaration memory and cpu are defined
+        applyTest("cpu-memory-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingCpuDeclarations() {
+        // Case 2:  In the given YAML declaration only cpu is defined
+        applyTest("cpu-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingMemoryDeclarations() {
+        //  Case 3:  In the given YAML declaration only memory is defined
+        applyTest("memory-deployment.yaml")
+    }
+
+    @Test
+    fun testWithoutResourceDeclarations() {
+        // Case 4: In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+        applyTest("no-resources-deployment.yaml")
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/ResourceRequestPatcherTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/ResourceRequestPatcherTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2af6c632567bf47e150a74808ab009bd0bc0598a
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/ResourceRequestPatcherTest.kt
@@ -0,0 +1,88 @@
+package theodolite
+
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.client.DefaultKubernetesClient
+import io.quarkus.test.junit.QuarkusTest
+import io.smallrye.common.constraint.Assert.assertTrue
+import org.junit.jupiter.api.Test
+import theodolite.k8s.K8sResourceLoader
+import theodolite.patcher.PatcherFactory
+import theodolite.util.PatcherDefinition
+
+/**
+ * Resource patcher test
+ *
+ * This class tested 4 scenarios for the ResourceLimitPatcher and the ResourceRequestPatcher.
+ * The different test cases specifies four possible situations:
+ * Case 1:  In the given YAML declaration memory and cpu are defined
+ * Case 2:  In the given YAML declaration only cpu is defined
+ * Case 3:  In the given YAML declaration only memory is defined
+ * Case 4:  In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+ */
+@QuarkusTest
+class ResourceRequestPatcherTest {
+    val testPath = "./src/test/resources/"
+    val loader = K8sResourceLoader(DefaultKubernetesClient().inNamespace(""))
+    val patcherFactory = PatcherFactory()
+
+    fun applyTest(fileName: String) {
+        val cpuValue = "50m"
+        val memValue = "3Gi"
+        val k8sResource = loader.loadK8sResource("Deployment", testPath + fileName) as Deployment
+
+        val defCPU = PatcherDefinition()
+        defCPU.resource = "cpu-memory-deployment.yaml"
+        defCPU.type = "ResourceRequestPatcher"
+        defCPU.properties = mutableMapOf(
+            "requestedResource" to "cpu",
+            "container" to "application"
+        )
+
+        val defMEM = PatcherDefinition()
+        defMEM.resource = "cpu-memory-deployment.yaml"
+        defMEM.type = "ResourceRequestPatcher"
+        defMEM.properties = mutableMapOf(
+            "requestedResource" to "memory",
+            "container" to "application"
+        )
+
+        patcherFactory.createPatcher(
+            patcherDefinition = defCPU,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = cpuValue)
+        patcherFactory.createPatcher(
+            patcherDefinition = defMEM,
+            k8sResources = listOf(Pair("cpu-memory-deployment.yaml", k8sResource))
+        ).patch(value = memValue)
+
+        k8sResource.spec.template.spec.containers.filter { it.name == defCPU.properties["container"]!! }
+            .forEach {
+                assertTrue(it.resources.requests["cpu"].toString() == cpuValue)
+                assertTrue(it.resources.requests["memory"].toString() == memValue)
+            }
+    }
+
+    @Test
+    fun testWithExistingCpuAndMemoryDeclarations() {
+        // Case 1: In the given YAML declaration memory and cpu are defined
+        applyTest("cpu-memory-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingCpuDeclarations() {
+        // Case 2:  In the given YAML declaration only cpu is defined
+        applyTest("cpu-deployment.yaml")
+    }
+
+    @Test
+    fun testOnlyWithExistingMemoryDeclarations() {
+        //  Case 3:  In the given YAML declaration only memory is defined
+        applyTest("memory-deployment.yaml")
+    }
+
+    @Test
+    fun testWithoutResourceDeclarations() {
+        // Case 4: In the given YAML declaration neither `Resource Request` nor `Request Limit` is defined
+        applyTest("no-resources-deployment.yaml")
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmark.kt b/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmark.kt
new file mode 100644
index 0000000000000000000000000000000000000000..913a27a1b1c1412aa0a58baf9e11fafb1c7f4bd2
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmark.kt
@@ -0,0 +1,20 @@
+package theodolite
+
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkDeployment
+import theodolite.util.ConfigurationOverride
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+
+class TestBenchmark : Benchmark {
+
+    override fun buildDeployment(
+        load: LoadDimension,
+        res: Resource,
+        configurationOverrides: List<ConfigurationOverride?>,
+        loadGenerationDelay: Long,
+        afterTeardownDelay: Long
+    ): BenchmarkDeployment {
+        return TestBenchmarkDeployment()
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmarkDeployment.kt b/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmarkDeployment.kt
new file mode 100644
index 0000000000000000000000000000000000000000..68b08c294128368ee1b65549aa85c877bd4bf313
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmarkDeployment.kt
@@ -0,0 +1,9 @@
+package theodolite
+
+import theodolite.benchmark.BenchmarkDeployment
+
+class TestBenchmarkDeployment : BenchmarkDeployment {
+    override fun setup() {}
+
+    override fun teardown() {}
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmarkExecutorImpl.kt b/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmarkExecutorImpl.kt
new file mode 100644
index 0000000000000000000000000000000000000000..cbd2d5926d61b0bfd4de6fab0c14422ddf88f190
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/TestBenchmarkExecutorImpl.kt
@@ -0,0 +1,37 @@
+package theodolite
+
+import theodolite.benchmark.Benchmark
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.execution.BenchmarkExecutor
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+import java.time.Duration
+
+class TestBenchmarkExecutorImpl(
+    private val mockResults: Array<Array<Boolean>>,
+    benchmark: Benchmark,
+    results: Results,
+    slo: BenchmarkExecution.Slo,
+    executionId: Int,
+    loadGenerationDelay: Long,
+    afterTeardownDelay: Long
+) :
+    BenchmarkExecutor(
+        benchmark,
+        results,
+        executionDuration = Duration.ofSeconds(1),
+        configurationOverrides = emptyList(),
+        slo = slo,
+        repetitions = 1,
+        executionId = executionId,
+        loadGenerationDelay = loadGenerationDelay,
+        afterTeardownDelay = afterTeardownDelay
+    ) {
+
+    override fun runExperiment(load: LoadDimension, res: Resource): Boolean {
+        val result = this.mockResults[load.get()][res.get()]
+        this.results.setResult(Pair(load, res), result)
+        return result
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/BenchmarkCRDummy.kt b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/BenchmarkCRDummy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..f3fd06a16e38439a2a694b415edc4d8b332ffd4d
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/BenchmarkCRDummy.kt
@@ -0,0 +1,34 @@
+package theodolite.execution.operator
+
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.util.KafkaConfig
+
+class BenchmarkCRDummy(name: String) {
+
+    private val benchmark = KubernetesBenchmark()
+    private val benchmarkCR = BenchmarkCRD(benchmark)
+
+    fun getCR(): BenchmarkCRD {
+        return benchmarkCR
+    }
+
+    init {
+        val kafkaConfig = KafkaConfig()
+
+        kafkaConfig.bootstrapServer = ""
+        kafkaConfig.topics = emptyList()
+
+        benchmarkCR.spec = benchmark
+        benchmarkCR.metadata.name = name
+        benchmarkCR.kind = "Benchmark"
+        benchmarkCR.apiVersion = "v1"
+
+        benchmark.appResource = emptyList()
+        benchmark.loadGenResource = emptyList()
+        benchmark.resourceTypes = emptyList()
+        benchmark.loadTypes = emptyList()
+        benchmark.kafkaConfig = kafkaConfig
+        benchmark.name = benchmarkCR.metadata.name
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ControllerDummy.kt b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ControllerDummy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..eec399218a2920278f98dc7366f4f487262c3178
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ControllerDummy.kt
@@ -0,0 +1,67 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.NamespacedKubernetesClient
+import io.fabric8.kubernetes.client.dsl.MixedOperation
+import io.fabric8.kubernetes.client.dsl.Resource
+import io.fabric8.kubernetes.internal.KubernetesDeserializer
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.model.crd.BenchmarkExecutionList
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.KubernetesBenchmarkList
+
+private const val SCOPE = "Namespaced"
+private const val EXECUTION_SINGULAR = "execution"
+private const val EXECUTION_PLURAL = "executions"
+private const val BENCHMARK_SINGULAR = "benchmark"
+private const val BENCHMARK_PLURAL = "benchmarks"
+private const val API_VERSION = "v1"
+private const val GROUP = "theodolite.com"
+
+class ControllerDummy(client: NamespacedKubernetesClient) {
+
+    private var controller: TheodoliteController
+    val executionStateHandler = ExecutionStateHandler(
+        client = client
+    )
+
+    fun getController(): TheodoliteController {
+        return this.controller
+    }
+
+    init {
+        KubernetesDeserializer.registerCustomKind(
+            "$GROUP/$API_VERSION",
+            EXECUTION_SINGULAR,
+            ExecutionCRD::class.java
+        )
+
+        KubernetesDeserializer.registerCustomKind(
+            "$GROUP/$API_VERSION",
+            BENCHMARK_SINGULAR,
+            BenchmarkCRD::class.java
+        )
+
+        val executionCRDClient: MixedOperation<
+                ExecutionCRD,
+                BenchmarkExecutionList,
+                Resource<ExecutionCRD>> = client.customResources(
+            ExecutionCRD::class.java,
+            BenchmarkExecutionList::class.java
+        )
+
+        val benchmarkCRDClient = client.customResources(
+            BenchmarkCRD::class.java,
+            KubernetesBenchmarkList::class.java
+        )
+
+        val appResource = System.getenv("THEODOLITE_APP_RESOURCES") ?: "./config"
+        this.controller =
+            TheodoliteController(
+                namespace = client.namespace,
+                path = appResource,
+                benchmarkCRDClient = benchmarkCRDClient,
+                executionCRDClient = executionCRDClient,
+                executionStateHandler = executionStateHandler
+            )
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ControllerTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ControllerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9174a4cc78933d4c028b2c2a73e1adb63047868f
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ControllerTest.kt
@@ -0,0 +1,138 @@
+package theodolite.execution.operator
+
+import com.google.gson.Gson
+import com.google.gson.GsonBuilder
+import io.fabric8.kubernetes.client.CustomResourceList
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.Test
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.benchmark.KubernetesBenchmark
+import theodolite.model.crd.BenchmarkCRD
+import theodolite.model.crd.ExecutionCRD
+
+@QuarkusTest
+class ControllerTest {
+    private final val server = KubernetesServer(false, false)
+    lateinit var controller: TheodoliteController
+    private val gson: Gson = GsonBuilder().enableComplexMapKeySerialization().create()
+
+    private var benchmark = KubernetesBenchmark()
+    private var execution = BenchmarkExecution()
+
+    private val benchmarkResourceList = CustomResourceList<BenchmarkCRD>()
+    private val executionResourceList = CustomResourceList<ExecutionCRD>()
+
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+        this.controller = ControllerDummy(server.client).getController()
+
+        // benchmark
+        val benchmark1 = BenchmarkCRDummy(name = "Test-Benchmark")
+        val benchmark2 = BenchmarkCRDummy(name = "Test-Benchmark-123")
+        benchmarkResourceList.items = listOf(benchmark1.getCR(), benchmark2.getCR())
+
+        // execution
+        val execution1 = ExecutionCRDummy(name = "matching-execution", benchmark = "Test-Benchmark")
+        val execution2 = ExecutionCRDummy(name = "non-matching-execution", benchmark = "Test-Benchmark-456")
+        val execution3 = ExecutionCRDummy(name = "second-matching-execution", benchmark = "Test-Benchmark")
+        executionResourceList.items = listOf(execution1.getCR(), execution2.getCR(), execution3.getCR())
+
+        this.benchmark = benchmark1.getCR().spec
+        this.execution = execution1.getCR().spec
+
+        server
+            .expect()
+            .get()
+            .withPath("/apis/theodolite.com/v1/namespaces/test/benchmarks")
+            .andReturn(200, benchmarkResourceList)
+            .always()
+
+        server
+            .expect()
+            .get()
+            .withPath("/apis/theodolite.com/v1/namespaces/test/executions")
+            .andReturn(200, executionResourceList)
+            .always()
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    fun getBenchmarksTest() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod("getBenchmarks")
+        method.isAccessible = true
+
+        val result = method.invoke(controller) as List<KubernetesBenchmark>
+
+        assertEquals(2, result.size)
+        assertEquals(
+            gson.toJson(benchmark),
+            gson.toJson(result.firstOrNull())
+        )
+    }
+
+    @Test
+    fun getNextExecution() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod("getNextExecution")
+        method.isAccessible = true
+
+        val result = method.invoke(controller) as BenchmarkExecution?
+
+        assertEquals(
+            gson.toJson(this.execution),
+            gson.toJson(result)
+        )
+    }
+
+    @Test
+    fun setAdditionalLabelsTest() {
+        val method = controller
+            .javaClass
+            .getDeclaredMethod(
+                "setAdditionalLabels",
+                String::class.java,
+                String::class.java,
+                List::class.java,
+                BenchmarkExecution::class.java
+            )
+        method.isAccessible = true
+
+        this.benchmark.appResource = listOf("test-resource.yaml")
+
+        method.invoke(
+            controller,
+            "test-value",
+            "test-name",
+            this.benchmark.appResource,
+            this.execution
+        ) as BenchmarkExecution?
+
+        assertEquals(
+            "test-name",
+            this.execution
+                .configOverrides.firstOrNull()
+                ?.patcher
+                ?.properties
+                ?.get("variableName")
+        )
+        assertEquals(
+            "test-value",
+            this.execution
+                .configOverrides.firstOrNull()
+                ?.value
+        )
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ExecutionCRDummy.kt b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ExecutionCRDummy.kt
new file mode 100644
index 0000000000000000000000000000000000000000..2589319299cfa29f95216033ddc806d002f38663
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ExecutionCRDummy.kt
@@ -0,0 +1,52 @@
+package theodolite.execution.operator
+
+import theodolite.benchmark.BenchmarkExecution
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.ExecutionStatus
+import theodolite.model.crd.States
+
+class ExecutionCRDummy(name: String, benchmark: String) {
+
+    private val execution = BenchmarkExecution()
+    private val executionState = ExecutionStatus()
+    private val executionCR = ExecutionCRD(execution, executionState)
+
+    fun getCR(): ExecutionCRD {
+        return this.executionCR
+    }
+
+    init {
+        // configure metadata
+        executionCR.spec = execution
+        executionCR.metadata.name = name
+        executionCR.kind = "Execution"
+        executionCR.apiVersion = "v1"
+
+        // configure execution
+        val loadType = BenchmarkExecution.LoadDefinition()
+        loadType.loadType = ""
+        loadType.loadValues = emptyList()
+
+        val resourceDef = BenchmarkExecution.ResourceDefinition()
+        resourceDef.resourceType = ""
+        resourceDef.resourceValues = emptyList()
+
+        val exec = BenchmarkExecution.Execution()
+        exec.afterTeardownDelay = 0
+        exec.duration = 0
+        exec.loadGenerationDelay = 0
+        exec.repetitions = 1
+        exec.restrictions = emptyList()
+        exec.strategy = ""
+
+        execution.benchmark = benchmark
+        execution.load = loadType
+        execution.resources = resourceDef
+        execution.slos = emptyList()
+        execution.execution = exec
+        execution.configOverrides = mutableListOf()
+        execution.name = executionCR.metadata.name
+
+        executionState.executionState = States.PENDING.value
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ExecutionEventHandlerTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ExecutionEventHandlerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..fd192cd3b53db6447a75710d1813e116dc555aeb
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/ExecutionEventHandlerTest.kt
@@ -0,0 +1,220 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.api.model.KubernetesResource
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.k8s.K8sManager
+import theodolite.k8s.K8sResourceLoader
+import theodolite.model.crd.BenchmarkExecutionList
+import theodolite.model.crd.ExecutionCRD
+import theodolite.model.crd.States
+import java.lang.Thread.sleep
+
+
+private const val RESYNC_PERIOD = 1000 * 1000.toLong()
+
+
+@QuarkusTest
+class ExecutionEventHandlerTest {
+    private final val server = KubernetesServer(false, true)
+    private val testResourcePath = "./src/test/resources/k8s-resource-files/"
+    private final val executionName = "example-execution"
+    lateinit var factory: SharedInformerFactory
+    lateinit var executionVersion1: KubernetesResource
+    lateinit var executionVersion2: KubernetesResource
+    lateinit var stateHandler: ExecutionStateHandler
+    lateinit var manager: K8sManager
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+        val controllerDummy = ControllerDummy(server.client)
+
+        this.factory = server.client.informers()
+        val informerExecution = factory
+            .sharedIndexInformerForCustomResource(
+                ExecutionCRD::class.java,
+                BenchmarkExecutionList::class.java,
+                RESYNC_PERIOD
+            )
+
+        informerExecution.addEventHandler(
+            ExecutionHandler(
+                controller = controllerDummy.getController(),
+                stateHandler = controllerDummy.executionStateHandler
+            )
+        )
+
+        this.executionVersion1 = K8sResourceLoader(server.client)
+            .loadK8sResource("Execution", testResourcePath + "test-execution.yaml")
+
+        this.executionVersion2 = K8sResourceLoader(server.client)
+            .loadK8sResource("Execution", testResourcePath + "test-execution-update.yaml")
+
+        this.stateHandler = ControllerDummy(server.client).executionStateHandler
+
+        this.manager = K8sManager((server.client))
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+        factory.stopAllRegisteredInformers()
+    }
+
+    @Test
+    @DisplayName("Test onAdd method for executions without execution state")
+    fun testWithoutState() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+        assertEquals(
+            States.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onAdd method for executions with execution state `RUNNING`")
+    fun testWithStateIsRunning() {
+        manager.deploy(executionVersion1)
+        stateHandler
+            .setExecutionState(
+                resourceName = executionName,
+                status = States.RUNNING
+            )
+        factory.startAllRegisteredInformers()
+        sleep(500)
+        assertEquals(
+            States.RESTART,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `PENDING`")
+    fun testOnUpdatePending() {
+        manager.deploy(executionVersion1)
+
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        assertEquals(
+            States.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+
+        manager.deploy(executionVersion2)
+        assertEquals(
+            States.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `FINISHED`")
+    fun testOnUpdateFinished() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = States.FINISHED
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            States.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `FAILURE`")
+    fun testOnUpdateFailure() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = States.FAILURE
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            States.PENDING,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `RUNNING`")
+    fun testOnUpdateRunning() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = States.RUNNING
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            States.RESTART,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+
+    @Test
+    @DisplayName("Test onUpdate method for execution with execution state `RESTART`")
+    fun testOnUpdateRestart() {
+        manager.deploy(executionVersion1)
+        factory.startAllRegisteredInformers()
+        sleep(500)
+
+        stateHandler.setExecutionState(
+            resourceName = executionName,
+            status = States.RESTART
+        )
+
+        manager.deploy(executionVersion2)
+        sleep(500)
+
+        assertEquals(
+            States.RESTART,
+            stateHandler.getExecutionState(
+                resourceName = executionName
+            )
+        )
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/StateHandlerTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/StateHandlerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7537be82c4caf221bdeea7d112df8b6af153c876
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/execution/operator/StateHandlerTest.kt
@@ -0,0 +1,72 @@
+package theodolite.execution.operator
+
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertTrue
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+import theodolite.k8s.K8sContextFactory
+import theodolite.k8s.K8sManager
+import theodolite.k8s.K8sResourceLoader
+import theodolite.model.crd.States
+import java.time.Duration
+
+class StateHandlerTest {
+    private val testResourcePath = "./src/test/resources/k8s-resource-files/"
+    private val server = KubernetesServer(false, true)
+    private val context = K8sContextFactory().create(
+        api = "v1",
+        scope = "Namespaced",
+        group = "theodolite.com",
+        plural = "executions"
+    )
+
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+        val executionResource = K8sResourceLoader(server.client)
+            .loadK8sResource("Execution", testResourcePath + "test-execution.yaml")
+
+        K8sManager(server.client).deploy(executionResource)
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    @DisplayName("Test empty execution state")
+    fun executionWithoutExecutionStatusTest(){
+        val handler = ExecutionStateHandler(client = server.client)
+        assertEquals(States.NO_STATE, handler.getExecutionState("example-execution"))
+    }
+
+    @Test
+    @DisplayName("Test empty duration state")
+    fun executionWithoutDurationStatusTest(){
+        val handler = ExecutionStateHandler(client = server.client)
+        assertEquals("-", handler.getDurationState("example-execution"))
+    }
+
+    @Test
+    @DisplayName("Test set and get of the execution state")
+    fun executionStatusTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+
+        assertTrue(handler.setExecutionState("example-execution", States.INTERRUPTED))
+        assertEquals(States.INTERRUPTED, handler.getExecutionState("example-execution"))
+    }
+
+    @Test
+    @DisplayName("Test set and get of the duration state")
+    fun durationStatusTest() {
+        val handler = ExecutionStateHandler(client = server.client)
+
+        assertTrue(handler.setDurationState("example-execution", Duration.ofMillis(100)))
+        assertEquals("0s", handler.getDurationState("example-execution"))
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/k8s/K8sManagerTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/k8s/K8sManagerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..dc2bf016994d79b1021bebdc751102e291d60682
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/k8s/K8sManagerTest.kt
@@ -0,0 +1,155 @@
+package theodolite.k8s
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties
+import io.fabric8.kubernetes.api.model.*
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import mu.KotlinLogging
+import org.json.JSONObject
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+
+
+private val logger = KotlinLogging.logger {}
+
+@QuarkusTest
+@JsonIgnoreProperties(ignoreUnknown = true)
+class K8sManagerTest {
+    @JsonIgnoreProperties(ignoreUnknown = true)
+    private final val server = KubernetesServer(false, true)
+    private final val testResourcePath = "./src/test/resources/k8s-resource-files/"
+
+    private final val resourceName = "test-resource"
+    private final val metadata: ObjectMeta = ObjectMetaBuilder().withName(resourceName).build()
+
+
+    val defaultDeployment: Deployment = DeploymentBuilder()
+        .withMetadata(metadata)
+        .withNewSpec()
+        .editOrNewSelector()
+        .withMatchLabels<String, String>(mapOf("app" to "test"))
+        .endSelector()
+        .endSpec()
+        .build()
+
+    val defaultStatefulSet: StatefulSet = StatefulSetBuilder()
+        .withMetadata(metadata)
+        .withNewSpec()
+        .editOrNewSelector()
+        .withMatchLabels<String, String>(mapOf("app" to "test"))
+        .endSelector()
+        .endSpec()
+        .build()
+
+    val defaultService: Service = ServiceBuilder()
+        .withMetadata(metadata)
+        .build()
+
+    val defaultConfigMap: ConfigMap = ConfigMapBuilder()
+        .withMetadata(metadata)
+        .build()
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+
+    }
+
+    @Test
+    @DisplayName("Test handling of Deployments")
+    fun handleDeploymentTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultDeployment)
+        assertEquals(1, server.client.apps().deployments().list().items.size)
+        assertEquals(resourceName, server.client.apps().deployments().list().items.first().metadata.name)
+
+        manager.remove(defaultDeployment)
+        assertEquals(0, server.client.apps().deployments().list().items.size)
+    }
+
+    @Test
+    @DisplayName("Test handling of StatefulSets")
+    fun handleStatefulSetTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultStatefulSet)
+        assertEquals(1, server.client.apps().statefulSets().list().items.size)
+        assertEquals(resourceName, server.client.apps().statefulSets().list().items.first().metadata.name)
+
+        manager.remove(defaultStatefulSet)
+        assertEquals(0, server.client.apps().statefulSets().list().items.size)
+    }
+
+    @Test
+    @DisplayName("Test handling of Services")
+    fun handleServiceTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultService)
+        assertEquals(1, server.client.services().list().items.size)
+        assertEquals(resourceName, server.client.services().list().items.first().metadata.name)
+
+        manager.remove(defaultService)
+        assertEquals(0, server.client.services().list().items.size)
+    }
+
+
+    @Test
+    @DisplayName("Test handling of ConfigMaps")
+    fun handleConfigMapTest() {
+        val manager = K8sManager(server.client)
+
+        manager.deploy(defaultConfigMap)
+        assertEquals(1, server.client.configMaps().list().items.size)
+        assertEquals(resourceName, server.client.configMaps().list().items.first().metadata.name)
+
+        manager.remove(defaultConfigMap)
+        assertEquals(0, server.client.configMaps().list().items.size)
+    }
+
+    @Test
+    @DisplayName("Test handling of custom resources")
+    fun handleCustomResourcesTest() {
+        val manager = K8sManager(server.client)
+        val servicemonitor = K8sResourceLoader(server.client)
+            .loadK8sResource("ServiceMonitor", testResourcePath + "test-service-monitor.yaml")
+
+        val serviceMonitorContext = K8sContextFactory().create(
+            api = "v1",
+            scope = "Namespaced",
+            group = "monitoring.coreos.com",
+            plural = "servicemonitors"
+        )
+        manager.deploy(servicemonitor)
+
+        var serviceMonitors = JSONObject(server.client.customResource(serviceMonitorContext).list())
+            .getJSONArray("items")
+
+        assertEquals(1, serviceMonitors.length())
+        assertEquals(
+            "test-service-monitor",
+            serviceMonitors.getJSONObject(0).getJSONObject("metadata").getString("name")
+        )
+
+        manager.remove(servicemonitor)
+
+        serviceMonitors = JSONObject(server.client.customResource(serviceMonitorContext).list())
+            .getJSONArray("items")
+
+        assertEquals(0, serviceMonitors.length())
+    }
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/k8s/K8sResourceLoaderTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/k8s/K8sResourceLoaderTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..7c2aa50007274ff9b4d49f1c0cc05ae45a37d323
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/k8s/K8sResourceLoaderTest.kt
@@ -0,0 +1,110 @@
+package theodolite.k8s
+
+import io.fabric8.kubernetes.api.model.ConfigMap
+import io.fabric8.kubernetes.api.model.Service
+import io.fabric8.kubernetes.api.model.apps.Deployment
+import io.fabric8.kubernetes.api.model.apps.StatefulSet
+import io.fabric8.kubernetes.client.server.mock.KubernetesServer
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.AfterEach
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertTrue
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.DisplayName
+import org.junit.jupiter.api.Test
+
+@QuarkusTest
+class K8sResourceLoaderTest {
+    private final val server = KubernetesServer(false, true)
+    private final val testResourcePath = "./src/test/resources/k8s-resource-files/"
+
+    @BeforeEach
+    fun setUp() {
+        server.before()
+    }
+
+    @AfterEach
+    fun tearDown() {
+        server.after()
+    }
+
+    @Test
+    @DisplayName("Test loading of Deployments")
+    fun loadDeploymentTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("Deployment", testResourcePath + "test-deployment.yaml")
+
+        assertTrue(resource is Deployment)
+        assertTrue(resource.toString().contains("name=test-deployment"))
+    }
+
+    @Test
+    @DisplayName("Test loading of StatefulSet")
+    fun loadStatefulSetTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("StatefulSet", testResourcePath + "test-statefulset.yaml")
+
+        assertTrue(resource is StatefulSet)
+        assertTrue(resource.toString().contains("name=test-statefulset"))
+    }
+
+    @Test
+    @DisplayName("Test loading of Service")
+    fun loadServiceTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("Service", testResourcePath + "test-service.yaml")
+
+        assertTrue(resource is Service)
+        assertTrue(resource.toString().contains("name=test-service"))
+    }
+
+    @Test
+    @DisplayName("Test loading of ConfigMap")
+    fun loadConfigMapTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("ConfigMap", testResourcePath + "test-configmap.yaml")
+
+        assertTrue(resource is ConfigMap)
+        assertTrue(resource.toString().contains("name=test-configmap"))
+    }
+
+    @Test
+    @DisplayName("Test loading of ServiceMonitors")
+    fun loadServiceMonitorTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("ServiceMonitor", testResourcePath + "test-service-monitor.yaml")
+
+        assertTrue(resource is CustomResourceWrapper)
+        if (resource is CustomResourceWrapper) {
+            assertEquals("test-service-monitor", resource.getName())
+
+        }
+    }
+
+    @Test
+    @DisplayName("Test loading of Executions")
+    fun loadExecutionTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("Execution", testResourcePath + "test-execution.yaml")
+
+        assertTrue(resource is CustomResourceWrapper)
+        if (resource is CustomResourceWrapper) {
+            assertEquals("example-execution", resource.getName())
+
+        }
+    }
+
+    @Test
+    @DisplayName("Test loading of Benchmarks")
+    fun loadBenchmarkTest() {
+        val loader = K8sResourceLoader(server.client)
+        val resource = loader.loadK8sResource("Benchmark", testResourcePath + "test-benchmark.yaml")
+
+        assertTrue(resource is CustomResourceWrapper)
+        if (resource is CustomResourceWrapper) {
+            assertEquals("example-benchmark", resource.getName())
+
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/strategies/restriction/LowerBoundRestrictionTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/strategies/restriction/LowerBoundRestrictionTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..b368647e314a4d803b444268c8218aefbee00ad4
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/strategies/restriction/LowerBoundRestrictionTest.kt
@@ -0,0 +1,118 @@
+package theodolite.strategies.restriction
+
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertNotNull
+import org.junit.jupiter.api.Disabled
+import org.junit.jupiter.api.Test
+import theodolite.util.LoadDimension
+import theodolite.util.Resource
+import theodolite.util.Results
+
+internal class LowerBoundRestrictionTest {
+
+    @Test
+    fun testNoPreviousResults() {
+        val results = Results()
+        val strategy = LowerBoundRestriction(results)
+        val load = buildLoadDimension(10000)
+        val resources = listOf(
+            buildResourcesDimension(1),
+            buildResourcesDimension(2),
+            buildResourcesDimension(3)
+        )
+        val restriction = strategy.apply(load, resources)
+
+        assertEquals(3, restriction.size)
+        assertEquals(resources, restriction)
+    }
+
+    @Test
+    fun testWithSuccessfulPreviousResults() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, true)
+        val strategy = LowerBoundRestriction(results)
+        val load = buildLoadDimension(30000)
+        val resources = listOf(
+            buildResourcesDimension(1),
+            buildResourcesDimension(2),
+            buildResourcesDimension(3)
+        )
+        val restriction = strategy.apply(load, resources)
+
+        assertEquals(2, restriction.size)
+        assertEquals(resources.subList(1, 3), restriction)
+    }
+
+    @Test
+    @Disabled
+    fun testWithNoSuccessfulPreviousResults() {
+        // This test is currently not implemented this way, but might later be the desired behavior.
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, false)
+        results.setResult(20000, 3, false)
+        val strategy = LowerBoundRestriction(results)
+        val load = buildLoadDimension(30000)
+        val resources = listOf(
+            buildResourcesDimension(1),
+            buildResourcesDimension(2),
+            buildResourcesDimension(3)
+        )
+        val restriction = strategy.apply(load, resources)
+
+        assertEquals(0, restriction.size)
+        assertEquals(emptyList<Resource>(), restriction)
+    }
+
+
+    @Test
+    fun testNoPreviousResults2() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 2, true)
+        results.setResult(10000, 1, false)
+        results.setResult(20000, 2, true)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    @Test
+    @Disabled
+    fun testMinRequiredInstancesWhenNotSuccessful() {
+        // This test is currently not implemented this way, but might later be the desired behavior.
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(20000, 2, true)
+        results.setResult(10000, 1, false)
+        results.setResult(20000, 2, false)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    private fun buildLoadDimension(load: Int): LoadDimension {
+        return LoadDimension(load, emptyList())
+    }
+
+    private fun buildResourcesDimension(resources: Int): Resource {
+        return Resource(resources, emptyList())
+    }
+
+    private fun Results.setResult(load: Int, resources: Int, successful: Boolean) {
+        this.setResult(
+            Pair(
+                buildLoadDimension(load),
+                buildResourcesDimension(resources)
+            ),
+            successful
+        )
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/util/IOHandlerTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/util/IOHandlerTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..6b8aa1d567fd2c93c1301fe3f953273e0f5d5420
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/util/IOHandlerTest.kt
@@ -0,0 +1,139 @@
+package theodolite.util
+
+import com.google.gson.GsonBuilder
+import io.quarkus.test.junit.QuarkusTest
+import org.hamcrest.CoreMatchers.containsString
+import org.hamcrest.MatcherAssert.assertThat
+import org.junit.Rule
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertTrue
+import org.junit.jupiter.api.Test
+import org.junit.rules.TemporaryFolder
+import org.junitpioneer.jupiter.ClearEnvironmentVariable
+import org.junitpioneer.jupiter.SetEnvironmentVariable
+
+
+const val FOLDER_URL = "Test-Folder"
+
+@QuarkusTest
+internal class IOHandlerTest {
+
+    @Rule
+    private var temporaryFolder = TemporaryFolder()
+
+    @Test
+    fun testWriteStringToText() {
+        temporaryFolder.create()
+        val testContent = "Test-File-Content"
+        val folder = temporaryFolder.newFolder(FOLDER_URL)
+
+        IOHandler().writeStringToTextFile(
+            fileURL = "${folder.absolutePath}/test-file.txt",
+            data = testContent
+        )
+
+        assertEquals(
+            testContent,
+            IOHandler().readFileAsString("${folder.absolutePath}/test-file.txt")
+        )
+    }
+
+    @Test
+    fun testWriteToCSVFile() {
+        temporaryFolder.create()
+        val folder = temporaryFolder.newFolder(FOLDER_URL)
+
+        val testContent = listOf(
+            listOf("apples", "red"),
+            listOf("bananas", "yellow"),
+            listOf("avocado", "brown")
+        )
+        val columns = listOf("Fruit", "Color")
+
+        IOHandler().writeToCSVFile(
+            fileURL = "${folder.absolutePath}/test-file",
+            data = testContent,
+            columns = columns
+        )
+
+        var expected = "Fruit,Color\n"
+        testContent.forEach { expected += it[0] + "," + it[1] + "\n" }
+
+        assertEquals(
+            expected.trim(),
+            IOHandler().readFileAsString("${folder.absolutePath}/test-file.csv")
+        )
+    }
+
+    @Test
+    fun testWriteToJSONFile() {
+        temporaryFolder.create()
+        val folder = temporaryFolder.newFolder(FOLDER_URL)
+        val testContent = Resource(0, emptyList())
+
+        IOHandler().writeToJSONFile(
+            fileURL = "${folder.absolutePath}/test-file.json",
+            objectToSave = testContent
+        )
+
+        val expected = GsonBuilder().enableComplexMapKeySerialization().setPrettyPrinting().create().toJson(testContent)
+
+        assertEquals(
+            expected,
+            IOHandler().readFileAsString("${folder.absolutePath}/test-file.json")
+        )
+    }
+
+    // Test the function `getResultFolderString`
+
+    @Test
+    @ClearEnvironmentVariable.ClearEnvironmentVariables(
+        ClearEnvironmentVariable(key = "RESULTS_FOLDER"),
+        ClearEnvironmentVariable(key = "CREATE_RESULTS_FOLDER")
+    )
+    fun testGetResultFolderURL_emptyEnvironmentVars() {
+        assertEquals("", IOHandler().getResultFolderURL())
+    }
+
+
+    @Test()
+    @SetEnvironmentVariable.SetEnvironmentVariables(
+        SetEnvironmentVariable(key = "RESULTS_FOLDER", value = "./src/test/resources"),
+        SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "false")
+    )
+    fun testGetResultFolderURL_FolderExist() {
+        assertEquals("./src/test/resources/", IOHandler().getResultFolderURL())
+    }
+
+    @Test()
+    @SetEnvironmentVariable.SetEnvironmentVariables(
+        SetEnvironmentVariable(key = "RESULTS_FOLDER", value = "$FOLDER_URL-0"),
+        SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "false")
+    )
+    fun testGetResultFolderURL_FolderNotExist() {
+        var exceptionWasThrown = false
+        try {
+            IOHandler().getResultFolderURL()
+        } catch (e: Exception) {
+            exceptionWasThrown = true
+            assertThat(e.toString(), containsString("Result folder not found"))
+        }
+        assertTrue(exceptionWasThrown)
+    }
+
+    @Test()
+    @SetEnvironmentVariable.SetEnvironmentVariables(
+        SetEnvironmentVariable(key = "RESULTS_FOLDER", value = FOLDER_URL),
+        SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "true")
+    )
+    fun testGetResultFolderURL_CreateFolderIfNotExist() {
+        assertEquals("$FOLDER_URL/", IOHandler().getResultFolderURL())
+    }
+
+    @Test()
+    @ClearEnvironmentVariable(key = "RESULTS_FOLDER")
+    @SetEnvironmentVariable(key = "CREATE_RESULTS_FOLDER", value = "true")
+    fun testGetResultFolderURL_CreateFolderButNoFolderGiven() {
+        assertEquals("", IOHandler().getResultFolderURL())
+    }
+}
diff --git a/theodolite-quarkus/src/test/kotlin/theodolite/util/ResultsTest.kt b/theodolite-quarkus/src/test/kotlin/theodolite/util/ResultsTest.kt
new file mode 100644
index 0000000000000000000000000000000000000000..9cfc2ae78e7a8846e3f0fa136699509145e5de22
--- /dev/null
+++ b/theodolite-quarkus/src/test/kotlin/theodolite/util/ResultsTest.kt
@@ -0,0 +1,75 @@
+package theodolite.util
+
+import io.quarkus.test.junit.QuarkusTest
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.Assertions.assertNotNull
+import org.junit.jupiter.api.Disabled
+import org.junit.jupiter.api.Test
+
+@QuarkusTest
+internal class ResultsTest {
+
+    @Test
+    fun testMinRequiredInstancesWhenSuccessful() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, true)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    @Test
+    @Disabled
+    fun testMinRequiredInstancesWhenNotSuccessful() {
+        // This test is currently not implemented this way, but might later be the desired behavior.
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+        results.setResult(20000, 1, false)
+        results.setResult(20000, 2, false)
+
+        val minRequiredInstances = results.getMinRequiredInstances(LoadDimension(20000, emptyList()))
+
+        assertNotNull(minRequiredInstances)
+        assertEquals(2, minRequiredInstances!!.get())
+    }
+
+    private fun Results.setResult(load: Int, resources: Int, successful: Boolean) {
+        this.setResult(
+            Pair(
+                LoadDimension(load, emptyList()),
+                Resource(resources, emptyList())
+            ),
+            successful
+        )
+    }
+
+
+    @Test
+    fun testGetMaxBenchmarkedLoadWhenAllSuccessful() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+
+        val test1 = results.getMaxBenchmarkedLoad(LoadDimension(100000, emptyList()))!!.get()
+
+        assertEquals(10000, test1)
+    }
+
+    @Test
+    fun testGetMaxBenchmarkedLoadWhenLargestNotSuccessful() {
+        val results = Results()
+        results.setResult(10000, 1, true)
+        results.setResult(10000, 2, true)
+        results.setResult(20000, 1, false)
+
+        val test2 = results.getMaxBenchmarkedLoad(LoadDimension(100000, emptyList()))!!.get()
+
+        assertEquals(20000, test2)
+    }
+}
diff --git a/theodolite-quarkus/src/test/resources/cpu-deployment.yaml b/theodolite-quarkus/src/test/resources/cpu-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9845648949babd260192e6c6fa652db976c04288
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/cpu-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              cpu: 1000m
+            requests:
+              cpu: 500m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/cpu-memory-deployment.yaml b/theodolite-quarkus/src/test/resources/cpu-memory-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..eaae989abb1f3b4fa44f032eee700181fb75e48e
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/cpu-memory-deployment.yaml
@@ -0,0 +1,58 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+              cpu: 1000m
+            requests:
+              memory: 2Gi
+              cpu: 500m
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-benchmark.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-benchmark.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e690aa56d74d695b0b81469023ccf82d0046cf45
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-benchmark.yaml
@@ -0,0 +1,38 @@
+apiVersion: theodolite.com/v1
+kind: benchmark
+metadata:
+  name: example-benchmark
+spec:
+  appResource:
+    - "uc1-kstreams-deployment.yaml"
+    - "aggregation-service.yaml"
+    - "jmx-configmap.yaml"
+    - "uc1-service-monitor.yaml"
+  loadGenResource:
+    - "uc1-load-generator-deployment.yaml"
+    - "uc1-load-generator-service.yaml"
+  resourceTypes:
+    - typeName: "Instances"
+      patchers:
+        - type: "ReplicaPatcher"
+          resource: "uc1-kstreams-deployment.yaml"
+  loadTypes:
+    - typeName: "NumSensors"
+      patchers:
+        - type: "EnvVarPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            variableName: "NUM_SENSORS"
+            container: "workload-generator"
+        - type: "NumSensorsLoadGeneratorReplicaPatcher"
+          resource: "uc1-load-generator-deployment.yaml"
+          properties:
+            loadGenMaxRecords: "15000"
+  kafkaConfig:
+    bootstrapServer: "theodolite-cp-kafka:9092"
+    topics:
+      - name: "input"
+        numPartitions: 40
+        replicationFactor: 1
+      - name: "theodolite-.*"
+        removeOnly: True
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-configmap.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-configmap.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dce11c991749e538d856e664539e136e19a8ce6b
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-configmap.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: test-configmap
+data:
+  test: test
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-deployment.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e9c4bda12ce781dc85307ec393f821a5df04599e
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-deployment.yaml
@@ -0,0 +1,17 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: test-deployment
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-execution-update.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-execution-update.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ef4fdc007816bb492fbd90d6ddc516a2332cd5e
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-execution-update.yaml
@@ -0,0 +1,28 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: example-execution
+spec:
+  name: test
+  benchmark: "uc1-kstreams-update"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      threshold: 2000
+      prometheusUrl: "http://prometheus-operated:9090"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-execution.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-execution.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4d6ade6ae32b064fd45b3fa508a936645538a543
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-execution.yaml
@@ -0,0 +1,28 @@
+apiVersion: theodolite.com/v1
+kind: execution
+metadata:
+  name: example-execution
+spec:
+  name: test
+  benchmark: "uc1-kstreams"
+  load:
+    loadType: "NumSensors"
+    loadValues: [25000, 50000, 75000, 100000, 125000, 150000]
+  resources:
+    resourceType: "Instances"
+    resourceValues: [1, 2, 3, 4, 5]
+  slos:
+    - sloType: "lag trend"
+      threshold: 2000
+      prometheusUrl: "http://prometheus-operated:9090"
+      externalSloUrl: "http://localhost:80/evaluate-slope"
+      offset: 0
+      warmup: 60 # in seconds
+  execution:
+    strategy: "LinearSearch"
+    duration: 300 # in seconds
+    repetitions: 1
+    loadGenerationDelay: 30 # in seconds
+    restrictions:
+      - "LowerBound"
+  configOverrides: []
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-service-monitor.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-service-monitor.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e8a0e52e15245e790adf2cbf84edb517754267be
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-service-monitor.yaml
@@ -0,0 +1,7 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  labels:
+    app: titan-ccp-aggregation
+    appScope: titan-ccp
+  name: test-service-monitor
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-service.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-service.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..471b6db83525b1afbe8cdac38c42399ecc33ef57
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-service.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: test-service
+  labels:
+    app: titan-ccp-aggregation
+spec:
+  selector:
+    app: titan-ccp-aggregation
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/k8s-resource-files/test-statefulset.yaml b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-statefulset.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6a8810e9ee156ae1b055c1bef6ed4b29d1c41668
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/k8s-resource-files/test-statefulset.yaml
@@ -0,0 +1,16 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: test-statefulset
+spec:
+  selector:
+    matchLabels:
+      app: nginx
+  template:
+    metadata:
+      labels:
+        app: nginx
+    spec:
+      containers:
+        - name: nginx
+          image: k8s.gcr.io/nginx-slim:0.8
diff --git a/theodolite-quarkus/src/test/resources/memory-deployment.yaml b/theodolite-quarkus/src/test/resources/memory-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7af278b8c6b2efd13adbcc77e2db5a7b4c4478ad
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/memory-deployment.yaml
@@ -0,0 +1,56 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+          resources:
+            limits:
+              memory: 4Gi
+            requests:
+              memory: 2Gi
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file
diff --git a/theodolite-quarkus/src/test/resources/no-resources-deployment.yaml b/theodolite-quarkus/src/test/resources/no-resources-deployment.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0687a3e042575951ec903492589101c122406f7f
--- /dev/null
+++ b/theodolite-quarkus/src/test/resources/no-resources-deployment.yaml
@@ -0,0 +1,51 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: titan-ccp-aggregation
+spec:
+  selector:
+    matchLabels:
+      app: titan-ccp-aggregation
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: titan-ccp-aggregation
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: uc-application
+          image: ghcr.io/cau-se/theodolite-uc1-kstreams-app:latest
+          ports:
+            - containerPort: 5555
+              name: jmx
+          env:
+            - name: KAFKA_BOOTSTRAP_SERVERS
+              value: "my-confluent-cp-kafka:9092"
+            - name: SCHEMA_REGISTRY_URL
+              value: "http://my-confluent-cp-schema-registry:8081"
+            - name: JAVA_OPTS
+              value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
+            - name: COMMIT_INTERVAL_MS # Set as default for the applications
+              value: "100"
+        - name: prometheus-jmx-exporter
+          image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
+          command:
+            - java
+            - -XX:+UnlockExperimentalVMOptions
+            - -XX:+UseCGroupMemoryLimitForHeap
+            - -XX:MaxRAMFraction=1
+            - -XshowSettings:vm
+            - -jar
+            - jmx_prometheus_httpserver.jar
+            - "5556"
+            - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+          ports:
+            - containerPort: 5556
+          volumeMounts:
+            - name: jmx-config
+              mountPath: /etc/jmx-aggregation
+      volumes:
+        - name: jmx-config
+          configMap:
+            name: aggregation-jmx-configmap
\ No newline at end of file