From 9a660cf68bbd7a99e252f050c3cbccb1dff2b448 Mon Sep 17 00:00:00 2001
From: "stu126940@mail.uni-kiel.de" <stu126940@mail.uni-kiel.de>
Date: Tue, 26 May 2020 14:48:58 +0200
Subject: [PATCH 1/5] Add infrastructure for Kubernetes autoscaling

---
 .../horizontal-pod-autoscaler/custom-hpa.yaml | 25 +++++++++++++++++++
 .../resource-hpa.yaml                         | 20 +++++++++++++++
 .../infrastructure/metrics-server/values.yaml | 10 ++++++++
 .../prometheus/prometheus-adapter-values.yaml | 15 +++++++++++
 4 files changed, 70 insertions(+)
 create mode 100644 execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
 create mode 100644 execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
 create mode 100644 execution/infrastructure/metrics-server/values.yaml
 create mode 100644 execution/infrastructure/prometheus/prometheus-adapter-values.yaml

diff --git a/execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml b/execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
new file mode 100644
index 000000000..21e20b7c0
--- /dev/null
+++ b/execution/infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
@@ -0,0 +1,25 @@
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: titan-ccp-aggregation
+  namespace: default
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: titan-ccp-aggregation
+  minReplicas: 1
+  maxReplicas: 6
+  metrics:
+    - type: Object
+      object:
+        metric:
+          name: kafka_consumergroup_group_lag
+          selector: {matchLabels: {topic: input}}
+        describedObject:
+          #apiVersion: networking.k8s.io/v1beta1
+          kind: Service
+          name: kafka-lag-exporter-service
+        target:
+          type: Value
+          value: 1k
\ No newline at end of file
diff --git a/execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml b/execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
new file mode 100644
index 000000000..20b12c996
--- /dev/null
+++ b/execution/infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
@@ -0,0 +1,20 @@
+apiVersion: autoscaling/v2beta2
+kind: HorizontalPodAutoscaler
+metadata:
+  name: titan-ccp-aggregation-scaling
+  namespace: default
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: titan-ccp-aggregation
+  minReplicas: 1
+  maxReplicas: 3
+  metrics:
+    - type: Resource
+      resource:
+        name: cpu
+        target:
+          type: Utilization
+          averageUtilization: 50
+
diff --git a/execution/infrastructure/metrics-server/values.yaml b/execution/infrastructure/metrics-server/values.yaml
new file mode 100644
index 000000000..5bdb5eca6
--- /dev/null
+++ b/execution/infrastructure/metrics-server/values.yaml
@@ -0,0 +1,10 @@
+image:
+  repository: k8s.gcr.io/metrics-server-amd64
+  tag: v0.3.6
+  pullPolicy: IfNotPresent
+
+
+args: 
+# enable this if you have self-signed certificates, see: https://github.com/kubernetes-incubator/metrics-server
+- --kubelet-insecure-tls
+- --kubelet-preferred-address-types=InternalIP
\ No newline at end of file
diff --git a/execution/infrastructure/prometheus/prometheus-adapter-values.yaml b/execution/infrastructure/prometheus/prometheus-adapter-values.yaml
new file mode 100644
index 000000000..4ee8793a4
--- /dev/null
+++ b/execution/infrastructure/prometheus/prometheus-adapter-values.yaml
@@ -0,0 +1,15 @@
+prometheus:
+    url: http://prometheus-operated.default.svc
+    port: 9090
+
+rules:
+
+      default: false
+      custom:
+      - seriesQuery: 'kafka_consumergroup_group_lag'
+        resources:
+            template: <<.Resource>>
+        name:
+            matches: ""
+            as: "kafka_consumergroup_group_lag"
+        metricsQuery: sum(kafka_consumergroup_group_lag{<<.LabelMatchers>>} > 0) by (<<.GroupBy>>)
-- 
GitLab


From a84473beafdb2808967bcdfd9a4812da6b4102b1 Mon Sep 17 00:00:00 2001
From: "stu126940@mail.uni-kiel.de" <stu126940@mail.uni-kiel.de>
Date: Tue, 26 May 2020 14:51:49 +0200
Subject: [PATCH 2/5] make deploymentes configurable for autosclaling

---
 execution/uc1-application/aggregation-deployment.yaml | 10 ++++++++++
 execution/uc2-application/aggregation-deployment.yaml | 10 ++++++++++
 execution/uc3-application/aggregation-deployment.yaml | 10 ++++++++++
 execution/uc4-application/aggregation-deployment.yaml | 10 ++++++++++
 4 files changed, 40 insertions(+)

diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc1-application/aggregation-deployment.yaml
index d5bccca4a..14e891916 100644
--- a/execution/uc1-application/aggregation-deployment.yaml
+++ b/execution/uc1-application/aggregation-deployment.yaml
@@ -30,6 +30,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -42,6 +45,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml
index ce5242173..9145543d4 100644
--- a/execution/uc2-application/aggregation-deployment.yaml
+++ b/execution/uc2-application/aggregation-deployment.yaml
@@ -30,6 +30,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -42,6 +45,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml
index 0f3327af3..dd0192e01 100644
--- a/execution/uc3-application/aggregation-deployment.yaml
+++ b/execution/uc3-application/aggregation-deployment.yaml
@@ -32,6 +32,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -44,6 +47,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml
index f7a750c79..790eac73f 100644
--- a/execution/uc4-application/aggregation-deployment.yaml
+++ b/execution/uc4-application/aggregation-deployment.yaml
@@ -34,6 +34,9 @@ spec:
           limits:
             memory: "{{MEMORY_LIMIT}}"
             cpu: "{{CPU_LIMIT}}"
+          requests:
+            memory: "{{MEMORY_REQUEST}}"
+            cpu: "{{CPU_REQUEST}}"
       - name: prometheus-jmx-exporter
         image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
         command:
@@ -46,6 +49,13 @@ spec:
           - jmx_prometheus_httpserver.jar
           - "5556"
           - /etc/jmx-aggregation/jmx-kafka-prometheus.yml
+        resources:
+          limits:
+            memory: "{{JMX_MEMORY_LIMIT}}"
+            cpu: "{{JMX_CPU_LIMIT}}"
+          requests:
+            memory: "{{JMX_MEMORY_REQUEST}}"
+            cpu: "{{JMX_CPU_REQUEST}}"
         ports:
           - containerPort: 5556
         volumeMounts:
-- 
GitLab


From f185cd570aac6472f90b750f8ebbfa4a2d3d3752 Mon Sep 17 00:00:00 2001
From: "stu126940@mail.uni-kiel.de" <stu126940@mail.uni-kiel.de>
Date: Tue, 26 May 2020 14:55:05 +0200
Subject: [PATCH 3/5] add documentation for autoscaling

---
 execution/README.md           |  2 ++
 execution/elasticity-setup.md | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)
 create mode 100644 execution/elasticity-setup.md

diff --git a/execution/README.md b/execution/README.md
index f7df1a32a..49b184abb 100644
--- a/execution/README.md
+++ b/execution/README.md
@@ -106,6 +106,8 @@ To let Prometheus scrape Kafka lag metrics, deploy a ServiceMonitor:
 kubectl apply -f infrastructure/kafka-lag-exporter/service-monitor.yaml
 ```
 
+## Elasticity Benchmarking
+We provide also a configuration to enable autosclaing of the benchmarked application, see [elasticity-setup](./elasticity-setup.md).
 
 ## Python 3.7
 
diff --git a/execution/elasticity-setup.md b/execution/elasticity-setup.md
new file mode 100644
index 000000000..c1064bd51
--- /dev/null
+++ b/execution/elasticity-setup.md
@@ -0,0 +1,33 @@
+# Elasticity Benchmark
+
+## Requirements
+
+### metrics server 
+Since Kubernetes 1.11 **metrics-server** replaces *Heapster* as the primary cluster-wide metrics aggregator. To provide *resource-metrics* like cpu or memory, you need to deploy a metrics-server instance:
+
+```sh
+helm install metrics-server stable/metrics-server -f infrastructure/metrics-server/values.yaml
+```
+
+### prometheus adapter 
+For custom metrics, like the Kafka consumer-lag, a Prometheus adapter is required. The adapter enables querying Prometheus metrics via the  `custom.metrics.k8s.io API`. 
+
+```sh
+helm install prometheus-adapter stable/prometheus-adapter -f infrastructure/prometheus/prometheus-adapter-values.yaml
+```
+
+### horizontal pod autoscaler 
+We provide two configuration of the horizontal pod autoscaler 2. First cpu-based autoscaling, second for consumer-lag based autoscaling.
+
+For CPU based autoscaling:
+```sh
+kubectl apply -f infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
+```
+
+For consumer-lag autoscaling: 
+```sh
+kubectl apply -f infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
+```
+
+### Hints
+- Resource limits are required 
-- 
GitLab


From f7cc617a810db66fd3adbd35b469db1242010063 Mon Sep 17 00:00:00 2001
From: "stu126940@mail.uni-kiel.de" <stu126940@mail.uni-kiel.de>
Date: Tue, 26 May 2020 14:55:20 +0200
Subject: [PATCH 4/5] add documentation for autoscaling

---
 execution/elasticity-setup.md | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/execution/elasticity-setup.md b/execution/elasticity-setup.md
index c1064bd51..a941cca5e 100644
--- a/execution/elasticity-setup.md
+++ b/execution/elasticity-setup.md
@@ -2,21 +2,21 @@
 
 ## Requirements
 
-### metrics server 
+### metrics server
 Since Kubernetes 1.11 **metrics-server** replaces *Heapster* as the primary cluster-wide metrics aggregator. To provide *resource-metrics* like cpu or memory, you need to deploy a metrics-server instance:
 
 ```sh
 helm install metrics-server stable/metrics-server -f infrastructure/metrics-server/values.yaml
 ```
 
-### prometheus adapter 
+### prometheus adapter
 For custom metrics, like the Kafka consumer-lag, a Prometheus adapter is required. The adapter enables querying Prometheus metrics via the  `custom.metrics.k8s.io API`. 
 
 ```sh
 helm install prometheus-adapter stable/prometheus-adapter -f infrastructure/prometheus/prometheus-adapter-values.yaml
 ```
 
-### horizontal pod autoscaler 
+### horizontal pod autoscaler
 We provide two configuration of the horizontal pod autoscaler 2. First cpu-based autoscaling, second for consumer-lag based autoscaling.
 
 For CPU based autoscaling:
@@ -24,10 +24,10 @@ For CPU based autoscaling:
 kubectl apply -f infrastructure/horizontal-pod-autoscaler/resource-hpa.yaml
 ```
 
-For consumer-lag autoscaling: 
+For consumer-lag autoscaling:
 ```sh
 kubectl apply -f infrastructure/horizontal-pod-autoscaler/custom-hpa.yaml
 ```
 
 ### Hints
-- Resource limits are required 
+- Resource limits are required
-- 
GitLab


From e2ed3e3d07ce083a464737fb1b5ec51d605b21ee Mon Sep 17 00:00:00 2001
From: "stu126940@mail.uni-kiel.de" <stu126940@mail.uni-kiel.de>
Date: Tue, 26 May 2020 14:58:55 +0200
Subject: [PATCH 5/5] Add configuration parameter for autoscaling

---
 execution/run_loop.sh    | 17 ++++++++++++++++-
 execution/run_uc1-new.sh | 12 +++++++++++-
 execution/run_uc2-new.sh |  9 ++++++++-
 execution/run_uc3-new.sh |  9 ++++++++-
 execution/run_uc4-new.sh |  9 ++++++++-
 5 files changed, 51 insertions(+), 5 deletions(-)

diff --git a/execution/run_loop.sh b/execution/run_loop.sh
index e63c0ecdf..9fabd65db 100755
--- a/execution/run_loop.sh
+++ b/execution/run_loop.sh
@@ -8,6 +8,15 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-200m}
+MEMORY_REQUEST=${12:-200Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
+
+
 
 # Get and increment counter
 EXP_ID=$(cat exp_counter.txt)
@@ -23,6 +32,12 @@ CPU_LIMIT=$CPU_LIMIT
 MEMORY_LIMIT=$MEMORY_LIMIT
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=$KAFKA_STREAMS_COMMIT_INTERVAL_MS
 EXECUTION_MINUTES=$EXECUTION_MINUTES
+CPU_REQUEST=$CPU_REQUEST
+MEMORY_REQUEST=$MEMORY_REQUEST
+JMX_CPU_LIMIT=$JMX_CPU_LIMIT
+MEMORY_LIMIT=$JMX_MEMORY_LIMIT
+JMX_CPU_REQUEST=$JMX_CPU_REQUEST
+MEMORY_REQUEST=$JMX_MEMORY_REQUEST
 " >> "exp${EXP_ID}_uc${UC}_meta.txt"
 
 SUBEXPERIMENTS=$((${#DIM_VALUES[@]} * ${#REPLICAS[@]}))
@@ -35,7 +50,7 @@ do
     do
         SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1))
         echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA"
-        ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
+       ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES $JMX_CPU_LIMIT $JMX_MEMORY_LIMIT $CPU_REQUEST $MEMORY_REQUEST $JMX_CPU_REQUEST $JMX_MEMORY_REQUEST 
         sleep 10s
     done
 done
diff --git a/execution/run_uc1-new.sh b/execution/run_uc1-new.sh
index 0edb75d00..0ad8438cb 100755
--- a/execution/run_uc1-new.sh
+++ b/execution/run_uc1-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -29,13 +36,16 @@ NUM_SENSORS=$DIM_VALUE
 WL_MAX_RECORDS=150000
 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
 
+
+
+
 WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
 echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 
 # Start application
 REPLICAS=$INSTANCES
 #kubectl apply -f uc3-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/run_uc2-new.sh b/execution/run_uc2-new.sh
index 503c4ffa0..fdab7a0bd 100755
--- a/execution/run_uc2-new.sh
+++ b/execution/run_uc2-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -31,7 +38,7 @@ sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deploy
 # Start application
 REPLICAS=$INSTANCES
 #kubectl apply -f uc2-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/run_uc3-new.sh b/execution/run_uc3-new.sh
index b8c7c20a1..31dd97044 100755
--- a/execution/run_uc3-new.sh
+++ b/execution/run_uc3-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -35,7 +42,7 @@ echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
 # Start application
 REPLICAS=$INSTANCES
 #kubectl apply -f uc3-application/aggregation-deployment.yaml
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
diff --git a/execution/run_uc4-new.sh b/execution/run_uc4-new.sh
index ee3aaae98..ceb540002 100755
--- a/execution/run_uc4-new.sh
+++ b/execution/run_uc4-new.sh
@@ -8,6 +8,13 @@ CPU_LIMIT=${5:-1000m}
 MEMORY_LIMIT=${6:-4Gi}
 KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
 EXECUTION_MINUTES=${8:-5}
+JMX_CPU_LIMIT=${9:-500m}
+JMX_MEMORY_LIMIT=${10:-2Gi}
+# request must be less than or equal to the corresponding limit
+CPU_REQUEST=${11:-100m}
+MEMORY_REQUEST=${12:-100Mi}
+JMX_CPU_REQUEST=${13:-50m}
+JMX_MEMORY_REQUEST=${14:-50Mi}
 
 echo "EXP_ID: $EXP_ID"
 echo "DIM_VALUE: $DIM_VALUE"
@@ -34,7 +41,7 @@ REPLICAS=$INSTANCES
 #AGGREGATION_DURATION_DAYS=$DIM_VALUE
 #kubectl apply -f uc4-application/aggregation-deployment.yaml
 #sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
-APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
+APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{JMX_CPU_LIMIT}}/$JMX_CPU_LIMIT/g; s/{{JMX_MEMORY_LIMIT}}/$JMX_MEMORY_LIMIT/g; s/{{CPU_REQUEST}}/$CPU_REQUEST/g; s/{{MEMORY_REQUEST}}/$MEMORY_REQUEST/g; s/{{JMX_CPU_REQUEST}}/$JMX_CPU_REQUEST/g; s/{{JMX_MEMORY_REQUEST}}/$JMX_MEMORY_REQUEST/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
 echo "$APPLICATION_YAML" | kubectl apply -f -
 kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
 
-- 
GitLab