diff --git a/execution/README.md b/execution/README.md index 2ad12f24c1d252194c4e58ec8994548496a09d8c..6c6203ad983549bf0ed2fdb040f4165dc36bd6bd 100644 --- a/execution/README.md +++ b/execution/README.md @@ -170,18 +170,47 @@ access (e.g. via SSH) to one of your cluster nodes. You first need to create a directory on a selected node where all benchmark results should be stored. Next, modify `infrastructure/kubernetes/volume-local.yaml` by setting `<node-name>` to your selected node. (This node will most likely also execute the [Theodolite job](#Execution).) Further, you have to set `path` to the directory on the node you just created. To deploy -you volume run: +your volume run: ```sh kubectl apply -f infrastructure/kubernetes/volume-local.yaml ``` +##### *Oracle Cloud Infrastructure* volume + +When you are running in the Oracle Cloud, you can provision a persistent volume claim by attaching a volume from the +Oracle Cloud Infrastructure Block Volume service. To create your volume, run: + +```sh +kubectl apply -f infrastructure/kubernetes/volume-oci.yaml +``` + +More information can be found in the official documentation: +[Oracle Cloud Infrastructure: Creating a Persistent Volume Claim](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengcreatingpersistentvolumeclaim.htm) + ##### Other volumes -To use volumes provided by public cloud providers or network-based file systems, you can use the definitions in +To use volumes provided by other public cloud providers or network-based file systems, you can use the definitions in `infrastructure/kubernetes/` as a starting point. See the offical [volumes documentation](https://kubernetes.io/docs/concepts/storage/volumes/) for additional information. +##### Accessing benchmark results via Kubernetes + +In cases where you do not have direct access to the underlying storage infrasturcture of your volume (e.g., if your +admin configures a local or hostPath volume for you and you do not have SSH access to the node), you can deploy our +Theodolite results access deployment: + +```sh +kubectl apply -f infrastructure/kubernetes/volume-access.yaml +``` + +It allows you to browse the benchmark results or copy files your Kubernetes client via the following commands: + +```sh +kubectl exec -it $(kubectl get pods -o=name -l app=theodolite-results-access) -- sh +kubectl cp $(kubectl get pods --no-headers -o custom-columns=":metadata.name" -l app=theodolite-results-access):app/results <target-dir> +``` + ## Execution diff --git a/execution/infrastructure/kubernetes/volume-access.yaml b/execution/infrastructure/kubernetes/volume-access.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54c996160726504b0965af791c74cff11a860c8e --- /dev/null +++ b/execution/infrastructure/kubernetes/volume-access.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: theodolite-results-access + labels: + app: theodolite-results-access +spec: + replicas: 1 + selector: + matchLabels: + app: theodolite-results-access + template: + metadata: + labels: + app: theodolite-results-access + spec: + containers: + - name: theodolite-results-access + image: busybox:latest + command: + - sh + - -c + - exec tail -f /dev/null + volumeMounts: + - mountPath: /app/results + name: theodolite-pv-storage + volumes: + - name: theodolite-pv-storage + persistentVolumeClaim: + claimName: theodolite-pv-claim diff --git a/execution/infrastructure/kubernetes/volume-oci-access.yaml b/execution/infrastructure/kubernetes/volume-oci-access.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c129600f8c6168f06ddcf2865ff29bc4e3c942c --- /dev/null +++ b/execution/infrastructure/kubernetes/volume-oci-access.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: theodolite-results-access +spec: + restartPolicy: Always + containers: + - name: theodolite-results-access + image: busybox:latest + command: + - sh + - -c + - exec tail -f /dev/null + volumeMounts: + - mountPath: /app/results + name: theodolite-pv-storage + volumes: + - name: theodolite-pv-storage + persistentVolumeClaim: + claimName: theodolite-pv-claim diff --git a/execution/infrastructure/kubernetes/volume-oci.yaml b/execution/infrastructure/kubernetes/volume-oci.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39d267011661b56021f7e716d860ab427608ed05 --- /dev/null +++ b/execution/infrastructure/kubernetes/volume-oci.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: theodolite-pv-claim +spec: + storageClassName: "oci-bv" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi diff --git a/execution/run_uc.py b/execution/run_uc.py index a0fcdbb6d57e5dc67d18e69b7d07fcdbfa809307..9bbb2876447438c1c3ac676091b11f6baa990622 100644 --- a/execution/run_uc.py +++ b/execution/run_uc.py @@ -94,11 +94,11 @@ def load_yaml_files(): :return: wg, app_svc, app_svc_monitor ,app_jmx, app_deploy """ print('Load kubernetes yaml files') - wg = load_yaml('uc-workload-generator/base/workloadGenerator.yaml') - app_svc = load_yaml('uc-application/base/aggregation-service.yaml') - app_svc_monitor = load_yaml('uc-application/base/service-monitor.yaml') - app_jmx = load_yaml('uc-application/base/jmx-configmap.yaml') - app_deploy = load_yaml('uc-application/base/aggregation-deployment.yaml') + wg = load_yaml('uc-workload-generator/workloadGenerator.yaml') + app_svc = load_yaml('uc-application/aggregation-service.yaml') + app_svc_monitor = load_yaml('uc-application/service-monitor.yaml') + app_jmx = load_yaml('uc-application/jmx-configmap.yaml') + app_deploy = load_yaml('uc-application/aggregation-deployment.yaml') print('Kubernetes yaml files loaded') return wg, app_svc, app_svc_monitor, app_jmx, app_deploy diff --git a/execution/run_uc1.sh b/execution/run_uc1.sh deleted file mode 100755 index 02c46d8832fc800c57453570b14a6bf02681326a..0000000000000000000000000000000000000000 --- a/execution/run_uc1.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_SENSORS=$DIM_VALUE -WL_MAX_RECORDS=150000 -WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "$NUM_SENSORS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc1-workload-generator - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc1-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc1-application - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc1-workload-generator -kubectl delete -k uc-application/overlay/uc1-application - - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc2.sh b/execution/run_uc2.sh deleted file mode 100755 index 4544d3609ed807141455378b92ce3536ea2f92f6..0000000000000000000000000000000000000000 --- a/execution/run_uc2.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic aggregation-feedback --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_NESTED_GROUPS=$DIM_VALUE -WL_MAX_RECORDS=150000 -APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS)) -WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "4" - - name: HIERARCHY - value: "full" - - name: NUM_NESTED_GROUPS - value: "$NUM_NESTED_GROUPS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc2-workload-generator - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc2-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc2-application - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc2-workload-generator -kubectl delete -k uc-application/overlay/uc2-application - - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|aggregation-feedback|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|aggregation-feedback|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc3.sh b/execution/run_uc3.sh deleted file mode 100755 index 4f2323f937f19d01a73482dea6aeaf5e922a0a3f..0000000000000000000000000000000000000000 --- a/execution/run_uc3.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_SENSORS=$DIM_VALUE -WL_MAX_RECORDS=150000 -WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "$NUM_SENSORS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc3-workload-generator - - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc3-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc3-application -kubectl scale deployment uc3-titan-ccp-aggregation --replicas=$REPLICAS - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc3-workload-generator -kubectl delete -k uc-application/overlay/uc3-application - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc4.sh b/execution/run_uc4.sh deleted file mode 100755 index 08a38498839ef3c50a39c1ccfbd26914993ffbd3..0000000000000000000000000000000000000000 --- a/execution/run_uc4.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_SENSORS=$DIM_VALUE -WL_MAX_RECORDS=150000 -WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uuc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "$NUM_SENSORS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc4-workload-generator - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc4-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc4-application -kubectl scale deployment uc4-titan-ccp-aggregation --replicas=$REPLICAS - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc4-workload-generator -kubectl delete -k uc-application/overlay/uc4-application - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/uc-application/base/aggregation-deployment.yaml b/execution/uc-application/aggregation-deployment.yaml similarity index 100% rename from execution/uc-application/base/aggregation-deployment.yaml rename to execution/uc-application/aggregation-deployment.yaml diff --git a/execution/uc-application/base/aggregation-service.yaml b/execution/uc-application/aggregation-service.yaml similarity index 100% rename from execution/uc-application/base/aggregation-service.yaml rename to execution/uc-application/aggregation-service.yaml diff --git a/execution/uc-application/base/jmx-configmap.yaml b/execution/uc-application/jmx-configmap.yaml similarity index 100% rename from execution/uc-application/base/jmx-configmap.yaml rename to execution/uc-application/jmx-configmap.yaml diff --git a/execution/uc-application/base/kustomization.yaml b/execution/uc-application/kustomization.yaml similarity index 100% rename from execution/uc-application/base/kustomization.yaml rename to execution/uc-application/kustomization.yaml diff --git a/execution/uc-application/overlay/uc1-application/kustomization.yaml b/execution/uc-application/overlay/uc1-application/kustomization.yaml deleted file mode 100644 index 0d3820fe392e1d2224d78a8dd2415c4dce37c6e6..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc1-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc1- - -images: - - name: uc-app - newName: theodolite/theodolite-uc1-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc1-application/set_paramters.yaml b/execution/uc-application/overlay/uc1-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc1-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/overlay/uc2-application/kustomization.yaml b/execution/uc-application/overlay/uc2-application/kustomization.yaml deleted file mode 100644 index cd32cabf70fdfa666a5703c97bc4e4fad7800ba7..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc2-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc2- - -images: - - name: uc-app - newName: theodolite/theodolite-uc2-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc2-application/set_paramters.yaml b/execution/uc-application/overlay/uc2-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc2-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/overlay/uc3-application/kustomization.yaml b/execution/uc-application/overlay/uc3-application/kustomization.yaml deleted file mode 100644 index 5722cbca8cc79247063921a55252435804edefe6..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc3-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc3- - -images: - - name: uc-app - newName: theodolite/theodolite-uc3-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc3-application/set_paramters.yaml b/execution/uc-application/overlay/uc3-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc3-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/overlay/uc4-application/kustomization.yaml b/execution/uc-application/overlay/uc4-application/kustomization.yaml deleted file mode 100644 index b44a9bb643802735b740b74bdb47299fb413e5d3..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc4-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc4- - -images: - - name: uc-app - newName: theodolite/theodolite-uc4-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc4-application/set_paramters.yaml b/execution/uc-application/overlay/uc4-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc4-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/base/service-monitor.yaml b/execution/uc-application/service-monitor.yaml similarity index 100% rename from execution/uc-application/base/service-monitor.yaml rename to execution/uc-application/service-monitor.yaml diff --git a/execution/uc-workload-generator/base/kustomization.yaml b/execution/uc-workload-generator/kustomization.yaml similarity index 100% rename from execution/uc-workload-generator/base/kustomization.yaml rename to execution/uc-workload-generator/kustomization.yaml diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml deleted file mode 100644 index 553b769a3bacd3356d6b5af5ba2e865acdd47a7c..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc1- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc1-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml deleted file mode 100644 index b275607c27723b1e7e5e7e2b5c02942731bed809..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "25000" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml deleted file mode 100644 index ff68743355d55459f2df988e8dd42bf0b3b6ae64..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc2- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc2-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml deleted file mode 100644 index 187cb4717195537288e58035dcdda5f34fc9ceed..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "4" - - name: HIERARCHY - value: "full" - - name: NUM_NESTED_GROUPS - value: "5" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml deleted file mode 100644 index a7022480fcfe401f3e4e4c3898c3d79930198d3e..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc3- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc3-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml deleted file mode 100644 index b275607c27723b1e7e5e7e2b5c02942731bed809..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "25000" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml deleted file mode 100644 index 5efb0eb25a26371cdddfcc7969a2d10131dbb448..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc4- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc4-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml deleted file mode 100644 index b275607c27723b1e7e5e7e2b5c02942731bed809..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "25000" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/base/workloadGenerator.yaml b/execution/uc-workload-generator/workloadGenerator.yaml similarity index 100% rename from execution/uc-workload-generator/base/workloadGenerator.yaml rename to execution/uc-workload-generator/workloadGenerator.yaml