Skip to content
Snippets Groups Projects
Commit 2eeaaa5a authored by Sören Henning's avatar Sören Henning
Browse files

Allow for configuration of test parameters

parent 5920ba17
No related branches found
No related tags found
No related merge requests found
......@@ -3,11 +3,20 @@
EXP_ID=$1
DIM_VALUE=$2
INSTANCES=$3
PARTITIONS=$4
#CPU_LIMIT=1000
#MEMORY_LIMIT=4Gi
#KAFKA_STREAMS_COMMIT_INTERVAL_MS=100
EXECUTION_MINUTES=5
PARTITIONS=${4:-40}
CPU_LIMIT=${5:-1000m}
MEMORY_LIMIT=${6:-4Gi}
KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
EXECUTION_MINUTES=${8:-5}
echo "EXP_ID: $EXP_ID"
echo "DIM_VALUE: $DIM_VALUE"
echo "INSTANCES: $INSTANCES"
echo "PARTITIONS: $PARTITIONS"
echo "CPU_LIMIT: $CPU_LIMIT"
echo "MEMORY_LIMIT: $MEMORY_LIMIT"
echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
# Create Topics
#PARTITIONS=40
......@@ -21,11 +30,15 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
NUM_SENSORS=$DIM_VALUE
WL_MAX_RECORDS=150000
WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl apply -f -
WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
# Start application
REPLICAS=$INSTANCES
kubectl apply -f uc1-application/aggregation-deployment.yaml
#kubectl apply -f uc3-application/aggregation-deployment.yaml
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
......@@ -39,8 +52,10 @@ deactivate
# Stop wl and app
#kubectl delete -f uc1-workload-generator/deployment.yaml
#sed "s/{{INSTANCES}}/1/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
kubectl delete -f uc1-application/aggregation-deployment.yaml
#sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
#kubectl delete -f uc1-application/aggregation-deployment.yaml
echo "$APPLICATION_YAML" | kubectl delete -f -
# Delete topics instead of Kafka
......
......@@ -3,13 +3,20 @@
EXP_ID=$1
DIM_VALUE=$2
INSTANCES=$3
PARTITIONS=$4
#CPU_LIMIT=1000
#MEMORY_LIMIT=4Gi
#KAFKA_STREAMS_COMMIT_INTERVAL_MS=100
EXECUTION_MINUTES=5
PARTITIONS=${4:-40}
CPU_LIMIT=${5:-1000m}
MEMORY_LIMIT=${6:-4Gi}
KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
EXECUTION_MINUTES=${8:-5}
# Maybe start up Kafka
echo "EXP_ID: $EXP_ID"
echo "DIM_VALUE: $DIM_VALUE"
echo "INSTANCES: $INSTANCES"
echo "PARTITIONS: $PARTITIONS"
echo "CPU_LIMIT: $CPU_LIMIT"
echo "MEMORY_LIMIT: $MEMORY_LIMIT"
echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
# Create Topics
#PARTITIONS=40
......@@ -23,7 +30,9 @@ sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deploy
# Start application
REPLICAS=$INSTANCES
kubectl apply -f uc2-application/aggregation-deployment.yaml
#kubectl apply -f uc2-application/aggregation-deployment.yaml
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
......@@ -36,7 +45,8 @@ deactivate
# Stop wl and app
kubectl delete -f uc2-workload-generator/deployment.yaml
kubectl delete -f uc2-application/aggregation-deployment.yaml
#kubectl delete -f uc2-application/aggregation-deployment.yaml
echo "$APPLICATION_YAML" | kubectl delete -f -
# Delete topics instead of Kafka
......
......@@ -3,11 +3,20 @@
EXP_ID=$1
DIM_VALUE=$2
INSTANCES=$3
PARTITIONS=$4
CPU_LIMIT=1000m
MEMORY_LIMIT=4Gi
KAFKA_STREAMS_COMMIT_INTERVAL_MS=100
EXECUTION_MINUTES=5
PARTITIONS=${4:-40}
CPU_LIMIT=${5:-1000m}
MEMORY_LIMIT=${6:-4Gi}
KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
EXECUTION_MINUTES=${8:-5}
echo "EXP_ID: $EXP_ID"
echo "DIM_VALUE: $DIM_VALUE"
echo "INSTANCES: $INSTANCES"
echo "PARTITIONS: $PARTITIONS"
echo "CPU_LIMIT: $CPU_LIMIT"
echo "MEMORY_LIMIT: $MEMORY_LIMIT"
echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
# Create Topics
#PARTITIONS=40
......
......@@ -3,13 +3,20 @@
EXP_ID=$1
DIM_VALUE=$2
INSTANCES=$3
PARTITIONS=$4
#CPU_LIMIT=1000
#MEMORY_LIMIT=4Gi
#KAFKA_STREAMS_COMMIT_INTERVAL_MS=100
EXECUTION_MINUTES=5
PARTITIONS=${4:-40}
CPU_LIMIT=${5:-1000m}
MEMORY_LIMIT=${6:-4Gi}
KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100}
EXECUTION_MINUTES=${8:-5}
# Maybe start up Kafka
echo "EXP_ID: $EXP_ID"
echo "DIM_VALUE: $DIM_VALUE"
echo "INSTANCES: $INSTANCES"
echo "PARTITIONS: $PARTITIONS"
echo "CPU_LIMIT: $CPU_LIMIT"
echo "MEMORY_LIMIT: $MEMORY_LIMIT"
echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS"
echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
# Create Topics
#PARTITIONS=40
......@@ -25,8 +32,10 @@ sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml |
# Start application
REPLICAS=$INSTANCES
#AGGREGATION_DURATION_DAYS=$DIM_VALUE
kubectl apply -f uc4-application/aggregation-deployment.yaml
#kubectl apply -f uc4-application/aggregation-deployment.yaml
#sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
......@@ -39,7 +48,8 @@ deactivate
# Stop wl and app
kubectl delete -f uc4-workload-generator/deployment.yaml
kubectl delete -f uc4-application/aggregation-deployment.yaml
#kubectl delete -f uc4-application/aggregation-deployment.yaml
echo "$APPLICATION_YAML" | kubectl delete -f -
# Delete topics instead of Kafka
......
......@@ -23,13 +23,13 @@ spec:
- name: KAFKA_BOOTSTRAP_SERVERS
value: "my-confluent-cp-kafka:9092"
- name: COMMIT_INTERVAL_MS
value: "100"
value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
- name: JAVA_OPTS
value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
resources:
limits:
memory: "4Gi"
cpu: "1000m"
memory: "{{MEMORY_LIMIT}}"
cpu: "{{CPU_LIMIT}}"
- name: prometheus-jmx-exporter
image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
command:
......
......@@ -23,13 +23,13 @@ spec:
- name: KAFKA_BOOTSTRAP_SERVERS
value: "my-confluent-cp-kafka:9092"
- name: COMMIT_INTERVAL_MS
value: "100"
value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
- name: JAVA_OPTS
value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
resources:
limits:
memory: "4Gi"
cpu: "1000m"
memory: "{{MEMORY_LIMIT}}"
cpu: "{{CPU_LIMIT}}"
- name: prometheus-jmx-exporter
image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
command:
......
......@@ -27,13 +27,13 @@ spec:
- name: AGGREGATION_DURATION_ADVANCE
value: "1"
- name: COMMIT_INTERVAL_MS
value: "100"
value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
- name: JAVA_OPTS
value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
resources:
limits:
memory: "4Gi"
cpu: "1000m"
memory: "{{MEMORY_LIMIT}}"
cpu: "{{CPU_LIMIT}}"
- name: prometheus-jmx-exporter
image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
command:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment