diff --git a/execution/run_uc1-new.sh b/execution/run_uc1-new.sh index e8f9e32b78e0fbb638d3f3e7a7936badf292e58c..c827ba7a5b58e86c6fca813f72235e2e2f808a01 100755 --- a/execution/run_uc1-new.sh +++ b/execution/run_uc1-new.sh @@ -3,11 +3,20 @@ EXP_ID=$1 DIM_VALUE=$2 INSTANCES=$3 -PARTITIONS=$4 -#CPU_LIMIT=1000 -#MEMORY_LIMIT=4Gi -#KAFKA_STREAMS_COMMIT_INTERVAL_MS=100 -EXECUTION_MINUTES=5 +PARTITIONS=${4:-40} +CPU_LIMIT=${5:-1000m} +MEMORY_LIMIT=${6:-4Gi} +KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} +EXECUTION_MINUTES=${8:-5} + +echo "EXP_ID: $EXP_ID" +echo "DIM_VALUE: $DIM_VALUE" +echo "INSTANCES: $INSTANCES" +echo "PARTITIONS: $PARTITIONS" +echo "CPU_LIMIT: $CPU_LIMIT" +echo "MEMORY_LIMIT: $MEMORY_LIMIT" +echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" +echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" # Create Topics #PARTITIONS=40 @@ -21,11 +30,15 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z NUM_SENSORS=$DIM_VALUE WL_MAX_RECORDS=150000 WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) -sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl apply -f - + +WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml) +echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f - # Start application REPLICAS=$INSTANCES -kubectl apply -f uc1-application/aggregation-deployment.yaml +#kubectl apply -f uc3-application/aggregation-deployment.yaml +APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml) +echo "$APPLICATION_YAML" | kubectl apply -f - kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS # Execute for certain time @@ -39,8 +52,10 @@ deactivate # Stop wl and app #kubectl delete -f uc1-workload-generator/deployment.yaml #sed "s/{{INSTANCES}}/1/g" uc1-workload-generator/deployment.yaml | kubectl delete -f - -sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f - -kubectl delete -f uc1-application/aggregation-deployment.yaml +#sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f - +echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f - +#kubectl delete -f uc1-application/aggregation-deployment.yaml +echo "$APPLICATION_YAML" | kubectl delete -f - # Delete topics instead of Kafka diff --git a/execution/run_uc2-new.sh b/execution/run_uc2-new.sh index ce33fe2974fe2b705a8d8aa3696c459b369d2f6d..2f7eeddbf076d7ec74d128633caa264da9ad9922 100755 --- a/execution/run_uc2-new.sh +++ b/execution/run_uc2-new.sh @@ -3,13 +3,20 @@ EXP_ID=$1 DIM_VALUE=$2 INSTANCES=$3 -PARTITIONS=$4 -#CPU_LIMIT=1000 -#MEMORY_LIMIT=4Gi -#KAFKA_STREAMS_COMMIT_INTERVAL_MS=100 -EXECUTION_MINUTES=5 +PARTITIONS=${4:-40} +CPU_LIMIT=${5:-1000m} +MEMORY_LIMIT=${6:-4Gi} +KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} +EXECUTION_MINUTES=${8:-5} -# Maybe start up Kafka +echo "EXP_ID: $EXP_ID" +echo "DIM_VALUE: $DIM_VALUE" +echo "INSTANCES: $INSTANCES" +echo "PARTITIONS: $PARTITIONS" +echo "CPU_LIMIT: $CPU_LIMIT" +echo "MEMORY_LIMIT: $MEMORY_LIMIT" +echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" +echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" # Create Topics #PARTITIONS=40 @@ -23,7 +30,9 @@ sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deploy # Start application REPLICAS=$INSTANCES -kubectl apply -f uc2-application/aggregation-deployment.yaml +#kubectl apply -f uc2-application/aggregation-deployment.yaml +APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml) +echo "$APPLICATION_YAML" | kubectl apply -f - kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS # Execute for certain time @@ -36,7 +45,8 @@ deactivate # Stop wl and app kubectl delete -f uc2-workload-generator/deployment.yaml -kubectl delete -f uc2-application/aggregation-deployment.yaml +#kubectl delete -f uc2-application/aggregation-deployment.yaml +echo "$APPLICATION_YAML" | kubectl delete -f - # Delete topics instead of Kafka diff --git a/execution/run_uc3-new.sh b/execution/run_uc3-new.sh index 52aa8a3b11666d7eed7a2fa064416cb39622727f..e8c892aef52c8ef70ac738ea11850c2a18b2d867 100755 --- a/execution/run_uc3-new.sh +++ b/execution/run_uc3-new.sh @@ -3,11 +3,20 @@ EXP_ID=$1 DIM_VALUE=$2 INSTANCES=$3 -PARTITIONS=$4 -CPU_LIMIT=1000m -MEMORY_LIMIT=4Gi -KAFKA_STREAMS_COMMIT_INTERVAL_MS=100 -EXECUTION_MINUTES=5 +PARTITIONS=${4:-40} +CPU_LIMIT=${5:-1000m} +MEMORY_LIMIT=${6:-4Gi} +KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} +EXECUTION_MINUTES=${8:-5} + +echo "EXP_ID: $EXP_ID" +echo "DIM_VALUE: $DIM_VALUE" +echo "INSTANCES: $INSTANCES" +echo "PARTITIONS: $PARTITIONS" +echo "CPU_LIMIT: $CPU_LIMIT" +echo "MEMORY_LIMIT: $MEMORY_LIMIT" +echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" +echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" # Create Topics #PARTITIONS=40 diff --git a/execution/run_uc4-new.sh b/execution/run_uc4-new.sh index ff77c88ec648c4e3be87b6353f8ef5c23c23751d..24e03e58cccfd2134d8c25803e7e98ad197f9fcb 100755 --- a/execution/run_uc4-new.sh +++ b/execution/run_uc4-new.sh @@ -3,13 +3,20 @@ EXP_ID=$1 DIM_VALUE=$2 INSTANCES=$3 -PARTITIONS=$4 -#CPU_LIMIT=1000 -#MEMORY_LIMIT=4Gi -#KAFKA_STREAMS_COMMIT_INTERVAL_MS=100 -EXECUTION_MINUTES=5 +PARTITIONS=${4:-40} +CPU_LIMIT=${5:-1000m} +MEMORY_LIMIT=${6:-4Gi} +KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} +EXECUTION_MINUTES=${8:-5} -# Maybe start up Kafka +echo "EXP_ID: $EXP_ID" +echo "DIM_VALUE: $DIM_VALUE" +echo "INSTANCES: $INSTANCES" +echo "PARTITIONS: $PARTITIONS" +echo "CPU_LIMIT: $CPU_LIMIT" +echo "MEMORY_LIMIT: $MEMORY_LIMIT" +echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" +echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" # Create Topics #PARTITIONS=40 @@ -25,8 +32,10 @@ sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml | # Start application REPLICAS=$INSTANCES #AGGREGATION_DURATION_DAYS=$DIM_VALUE -kubectl apply -f uc4-application/aggregation-deployment.yaml +#kubectl apply -f uc4-application/aggregation-deployment.yaml #sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f - +APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml) +echo "$APPLICATION_YAML" | kubectl apply -f - kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS # Execute for certain time @@ -39,7 +48,8 @@ deactivate # Stop wl and app kubectl delete -f uc4-workload-generator/deployment.yaml -kubectl delete -f uc4-application/aggregation-deployment.yaml +#kubectl delete -f uc4-application/aggregation-deployment.yaml +echo "$APPLICATION_YAML" | kubectl delete -f - # Delete topics instead of Kafka diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc1-application/aggregation-deployment.yaml index fd033e0f9f9f7535ef2ca752c56f14d47dada43d..d5bccca4a72f6a47a855ed8a7ca47fac4a8a19ca 100644 --- a/execution/uc1-application/aggregation-deployment.yaml +++ b/execution/uc1-application/aggregation-deployment.yaml @@ -23,13 +23,13 @@ spec: - name: KAFKA_BOOTSTRAP_SERVERS value: "my-confluent-cp-kafka:9092" - name: COMMIT_INTERVAL_MS - value: "100" + value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}" - name: JAVA_OPTS value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" resources: limits: - memory: "4Gi" - cpu: "1000m" + memory: "{{MEMORY_LIMIT}}" + cpu: "{{CPU_LIMIT}}" - name: prometheus-jmx-exporter image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" command: diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml index 2ede150d79a5dc84838275702e123156878c06b1..ce52421731ea5fc044c435ad10adb311e7e7e878 100644 --- a/execution/uc2-application/aggregation-deployment.yaml +++ b/execution/uc2-application/aggregation-deployment.yaml @@ -23,13 +23,13 @@ spec: - name: KAFKA_BOOTSTRAP_SERVERS value: "my-confluent-cp-kafka:9092" - name: COMMIT_INTERVAL_MS - value: "100" + value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}" - name: JAVA_OPTS value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" resources: limits: - memory: "4Gi" - cpu: "1000m" + memory: "{{MEMORY_LIMIT}}" + cpu: "{{CPU_LIMIT}}" - name: prometheus-jmx-exporter image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" command: diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml index 5c5aa7d83a0e5b70fd951375ab53bf3b06236f8c..f7a750c790b6a9eab8453fa91e05176de665104e 100644 --- a/execution/uc4-application/aggregation-deployment.yaml +++ b/execution/uc4-application/aggregation-deployment.yaml @@ -27,13 +27,13 @@ spec: - name: AGGREGATION_DURATION_ADVANCE value: "1" - name: COMMIT_INTERVAL_MS - value: "100" + value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}" - name: JAVA_OPTS value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" resources: limits: - memory: "4Gi" - cpu: "1000m" + memory: "{{MEMORY_LIMIT}}" + cpu: "{{CPU_LIMIT}}" - name: prometheus-jmx-exporter image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" command: