diff --git a/execution/README.md b/execution/README.md
index f7df1a32ad8b7898ba34b792095957a8e1362fc8..49b184abb59d08c046642b26f9622d5a41c3661a 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -106,6 +106,8 @@ To let Prometheus scrape Kafka lag metrics, deploy a ServiceMonitor:
 kubectl apply -f infrastructure/kafka-lag-exporter/service-monitor.yaml
 ```
 
+## Elasticity Benchmarking
+We provide also a configuration to enable autosclaing of the benchmarked application, see [elasticity-setup](./elasticity-setup.md).
 
 ## Python 3.7
 
diff --git a/execution/elasticity-setup.md b/execution/elasticity-setup.md
new file mode 100644
index 0000000000000000000000000000000000000000..a941cca5e6b78f638f5b14415e4a006777f7dc2d
--- /dev/null
+++ b/execution/elasticity-setup.md
@@ -0,0 +1,33 @@
+# Elasticity Benchmark
+
+## Requirements
+
+### metrics server
+Since Kubernetes 1.11 **metrics-server** replaces *Heapster* as the primary cluster-wide metrics aggregator. To provide *resource-metrics* like cpu or memory, you need to deploy a metrics-server instance:
+
+```sh
+helm install metrics-server stable/metrics-server -f infrastructure/metrics-server/values.yaml
+```
+
+### prometheus adapter
+For custom metrics, like the Kafka consumer-lag, a Prometheus adapter is required. The adapter enables querying Prometheus metrics via the  `custom.metrics.k8s.io API`. 
+
+```sh
+helm install prometheus-adapter stable/prometheus-adapter -f infrastructure/prometheus/prometheus-adapter-values.yaml
+```
+
+### horizontal pod autoscaler
+We provide two configuration of the horizontal pod autoscaler 2. First cpu-based autoscaling, second for consumer-lag based autoscaling.
+
+For CPU based autoscaling:
+```sh
+kubectl apply -f infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
+```
+
+For consumer-lag autoscaling:
+```sh
+kubectl apply -f infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
+```
+
+### Hints
+- Resource limits are required
diff --git a/execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml b/execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..21e20b7c0142fff7023ebd441fb5be307bf0b1b0
--- /dev/null
+++ b/execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
@@ -0,0 +1,25 @@
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: titan-ccp-aggregation
+  namespace: default
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: titan-ccp-aggregation
+  minReplicas: 1
+  maxReplicas: 6
+  metrics:
+    - type: Object
+      object:
+        metric:
+          name: kafka_consumergroup_group_lag
+          selector: {matchLabels: {topic: input}}
+        describedObject:
+          #apiVersion: networking.k8s.io/v1beta1
+          kind: Service
+          name: kafka-lag-exporter-service
+        target:
+          type: Value
+          value: 1k
\ No newline at end of file
diff --git a/execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml b/execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..20b12c9963f54f1d256fb731c37997567d9e89dc
--- /dev/null
+++ b/execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
@@ -0,0 +1,20 @@
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: titan-ccp-aggregation-scaling
+  namespace: default
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: titan-ccp-aggregation
+  minReplicas: 1
+  maxReplicas: 3
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 50
+
diff --git a/execution/infrastructure/metrics-server/values.yaml b/execution/infrastructure/metrics-server/values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5bdb5eca69b302acd3d31257938cc8ca024c971e
--- /dev/null
+++ b/execution/infrastructure/metrics-server/values.yaml
@@ -0,0 +1,10 @@
+image:
+  repository: k8s.gcr.io/metrics-server-amd64
+  tag: v0.3.6
+  pullPolicy: IfNotPresent
+
+
+args: 
+# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server
+- --kubelet-insecure-tls
+- --kubelet-preferred-address-types=InternalIP
\ No newline at end of file
diff --git a/execution/infrastructure/prometheus/prometheus-adapter-values.yaml b/execution/infrastructure/prometheus/prometheus-adapter-values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4ee8793a4ad68eccced455cd52427c0436992769
--- /dev/null
+++ b/execution/infrastructure/prometheus/prometheus-adapter-values.yaml
@@ -0,0 +1,15 @@
+prometheus:
+    url: http://prometheus-operated.default.svc
+    port: 9090
+
+rules:
+
+      default: false
+      custom:
+      - seriesQuery: 'kafka_consumergroup_group_lag'
+        resources:
+            template: <<.Resource>>
+        name:
+            matches: ""
+            as: "kafka_consumergroup_group_lag"
+        metricsQuery: sum(kafka_consumergroup_group_lag{<<.LabelMatchers>>} > 0) by (<<.GroupBy>>)
diff --git a/execution/run_loop.sh b/execution/run_loop.sh
index e63c0ecdfc54d27456afd720cc66303bfb143b28..9fabd65dbe46cc77eeecf5dd5f12e212236e8237 100755
--- a/execution/run_loop.sh
+++ b/execution/run_loop.sh
@@ -8,6 +8,15 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-200m}
+MEMORY_REQUEST=${12:-200Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
+
+
 
 # Get and increment counter
 EXP_ID=$(cat exp_counter.txt)
@@ -23,6 +32,12 @@ CPU_LIMIT=$CPU_LIMIT
 MEMORY_LIMIT=$MEMORY_LIMIT
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=$KAFKA_STREAMS_COMMIT_INTERVAL_MS
 EXECUTION_MINUTES=$EXECUTION_MINUTES
+CPU_REQUEST=$CPU_REQUEST
+MEMORY_REQUEST=$MEMORY_REQUEST
+JMX_CPU_LIMIT=$JMX_CPU_LIMIT
+MEMORY_LIMIT=$JMX_MEMORY_LIMIT
+JMX_CPU_REQUEST=$JMX_CPU_REQUEST
+MEMORY_REQUEST=$JMX_MEMORY_REQUEST
 " >> "exp${EXP_ID}_uc${UC}_meta.txt"
 
 SUBEXPERIMENTS=$((${#DIM_VALUES[@]} * ${#REPLICAS[@]}))
@@ -35,7 +50,7 @@ do
     do
         SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1))
         echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA"
-        ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
+       ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES $JMX_CPU_LIMIT $JMX_MEMORY_LIMIT $CPU_REQUEST $MEMORY_REQUEST $JMX_CPU_REQUEST $JMX_MEMORY_REQUEST 
         sleep 10s
     done
 done
diff --git a/execution/run_uc1-new.sh b/execution/run_uc1-new.sh
index 0edb75d002861393ce9a4b1b59c21e5871c651eb..0ad8438cb8bf5779b01048248b29a2e86d2dfdb3 100755
--- a/execution/run_uc1-new.sh
+++ b/execution/run_uc1-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -29,13 +36,16 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
+
+
+
 WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
 echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 
 # Start application
 REPLICAS=$INSTANCES
 #kubectl apply -f uc3-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/run_uc2-new.sh b/execution/run_uc2-new.sh
index 503c4ffa0d1f7f4d785eb0fb1743fc305fc0732f..fdab7a0bd14b1f291ee5e7e07422c0343cdcf740 100755
--- a/execution/run_uc2-new.sh
+++ b/execution/run_uc2-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -31,7 +38,7 @@ sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deploy
 # Start application
 REPLICAS=$INSTANCES
 #kubectl apply -f uc2-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/run_uc3-new.sh b/execution/run_uc3-new.sh
index b8c7c20a1600ecf9c78ef3743ae46fb47ae04c17..31dd9704441ef52aa359c0944fbd68982c6a2f88 100755
--- a/execution/run_uc3-new.sh
+++ b/execution/run_uc3-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -35,7 +42,7 @@ echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 # Start application
 REPLICAS=$INSTANCES
 #kubectl apply -f uc3-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/run_uc4-new.sh b/execution/run_uc4-new.sh
index ee3aaae98f151ef22088608ee970d3f8d66989e1..ceb540002aea3b18a331997c4e362f325a9bceb7 100755
--- a/execution/run_uc4-new.sh
+++ b/execution/run_uc4-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -34,7 +41,7 @@ REPLICAS=$INSTANCES
 #AGGREGATION_DURATION_DAYS=$DIM_VALUE
 #kubectl apply -f uc4-application/aggregation-deployment.yaml
 #sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc1-application/aggregation-deployment.yaml
index d5bccca4a72f6a47a855ed8a7ca47fac4a8a19ca..14e8919161c0c87097fd2e099a6f512c528cddce 100644
--- a/execution/uc1-application/aggregation-deployment.yaml
+++ b/execution/uc1-application/aggregation-deployment.yaml
@@ -30,6 +30,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -42,6 +45,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml
index ce52421731ea5fc044c435ad10adb311e7e7e878..9145543d46f3b2bce9f071b97cfea9fa253cc0b0 100644
--- a/execution/uc2-application/aggregation-deployment.yaml
+++ b/execution/uc2-application/aggregation-deployment.yaml
@@ -30,6 +30,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -42,6 +45,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml
index 0f3327af3119df125e3431574e3e406183abc132..dd0192e0127900fc3714fd160aa6088c141ac3bb 100644
--- a/execution/uc3-application/aggregation-deployment.yaml
+++ b/execution/uc3-application/aggregation-deployment.yaml
@@ -32,6 +32,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -44,6 +47,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml
index f7a750c790b6a9eab8453fa91e05176de665104e..790eac73f784f68deee5f7579c1edc4cd8211bac 100644
--- a/execution/uc4-application/aggregation-deployment.yaml
+++ b/execution/uc4-application/aggregation-deployment.yaml
@@ -34,6 +34,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -46,6 +49,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts: