diff --git a/.gitignore b/.gitignore index 2c5938ae5be0ad3c1b7b18192635f386def00b64..bef98bd0b29a225ac758c501ea69e6eaf4ba1773 100644 --- a/.gitignore +++ b/.gitignore @@ -27,3 +27,5 @@ tmp/ .idea/ *.iml *.iws + +.venv diff --git a/execution/.gitignore b/execution/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..aab7f5dde3e52f51a5a788f9a83f44b2afeb123b --- /dev/null +++ b/execution/.gitignore @@ -0,0 +1,2 @@ +exp* +exp_counter.txt \ No newline at end of file diff --git a/execution/lag_analysis.py b/execution/lag_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..f2690ab13b072171020cd5b27a55d6260b9b9084 --- /dev/null +++ b/execution/lag_analysis.py @@ -0,0 +1,129 @@ +import sys +import os +import requests +from datetime import datetime, timedelta, timezone +import pandas as pd +import matplotlib.pyplot as plt +import csv + +# +exp_id = sys.argv[1] +benchmark = sys.argv[2] +dim_value = sys.argv[3] +instances = sys.argv[4] +execution_minutes = 5 +time_diff_ms = int(os.getenv('CLOCK_DIFF_MS', 0)) + +#http://localhost:9090/api/v1/query_range?query=sum%20by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)&start=2015-07-01T20:10:30.781Z&end=2020-07-01T20:11:00.781Z&step=15s + +now_local = datetime.utcnow().replace(tzinfo=timezone.utc).replace(microsecond=0) +now = now_local - timedelta(milliseconds=time_diff_ms) +print(f"Now Local: {now_local}") +print(f"Now Used: {now}") + +end = now +start = now - timedelta(minutes=execution_minutes) + +#print(start.isoformat().replace('+00:00', 'Z')) +#print(end.isoformat().replace('+00:00', 'Z')) + +response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={ + #'query': "sum by(job,topic)(kafka_consumer_consumer_fetch_manager_metrics_records_lag)", + 'query': "sum by(group, topic)(kafka_consumergroup_group_lag > 0)", + 'start': start.isoformat(), + 'end': end.isoformat(), + 'step': '5s'}) + +#response +#print(response.request.path_url) +#response.content +results = response.json()['data']['result'] + +d = [] + +for result in results: + #print(result['metric']['topic']) + topic = result['metric']['topic'] + for value in result['values']: + #print(value) + d.append({'topic': topic, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0}) + +df = pd.DataFrame(d) + +input = df.loc[df['topic'] == "input"] + +#input.plot(kind='line',x='timestamp',y='value',color='red') +#plt.show() + +from sklearn.linear_model import LinearRegression + +X = input.iloc[:, 1].values.reshape(-1, 1) # values converts it into a numpy array +Y = input.iloc[:, 2].values.reshape(-1, 1) # -1 means that calculate the dimension of rows, but have 1 column +linear_regressor = LinearRegression() # create object for the class +linear_regressor.fit(X, Y) # perform linear regression +Y_pred = linear_regressor.predict(X) # make predictions + +print(linear_regressor.coef_) + +#print(Y_pred) + +fields=[exp_id, datetime.now(), benchmark, dim_value, instances, linear_regressor.coef_] +print(fields) +with open(r'results.csv', 'a') as f: + writer = csv.writer(f) + writer.writerow(fields) + +filename = f"exp{exp_id}_{benchmark}_{dim_value}_{instances}" + +plt.plot(X, Y) +plt.plot(X, Y_pred, color='red') + +plt.savefig(f"{filename}_plot.png") + +df.to_csv(f"{filename}_values.csv") + + +# Load partition count + +response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={ + 'query': "count by(group,topic)(kafka_consumergroup_group_offset > 0)", + 'start': start.isoformat(), + 'end': end.isoformat(), + 'step': '5s'}) + +results = response.json()['data']['result'] + +d = [] + +for result in results: + #print(result['metric']['topic']) + topic = result['metric']['topic'] + for value in result['values']: + #print(value) + d.append({'topic': topic, 'timestamp': int(value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0}) + +df = pd.DataFrame(d) + +df.to_csv(f"{filename}_partitions.csv") + + +# Load instances count + +response = requests.get('http://kube1.se.internal:32529/api/v1/query_range', params={ + 'query': "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))", + 'start': start.isoformat(), + 'end': end.isoformat(), + 'step': '5s'}) + +results = response.json()['data']['result'] + +d = [] + +for result in results: + for value in result['values']: + #print(value) + d.append({'timestamp': int(value[0]), 'value': int(value[1])}) + +df = pd.DataFrame(d) + +df.to_csv(f"{filename}_instances.csv") \ No newline at end of file diff --git a/execution/run_loop.sh b/execution/run_loop.sh new file mode 100755 index 0000000000000000000000000000000000000000..04664a7cb4a88072ed3d0bca21297ac5b0f757ef --- /dev/null +++ b/execution/run_loop.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +UC=$1 +IFS=', ' read -r -a DIM_VALUES <<< "$2" +IFS=', ' read -r -a REPLICAS <<< "$3" +PARTITIONS=$4 + +# Get and increment counter +EXP_ID=$(cat exp_counter.txt) +echo $((EXP_ID+1)) > exp_counter.txt + +# Store meta information +IFS=$', '; echo \ +"UC=$UC +DIM_VALUES=${DIM_VALUES[*]} +REPLICAS=${REPLICAS[*]} +PARTITIONS=$PARTITIONS +" >> "exp${EXP_ID}_uc${UC}_meta.txt" + +SUBEXPERIMENTS=$((${#DIM_VALUES[@]} * ${#REPLICAS[@]})) +SUBEXPERIMENT_COUNTER=0 + +echo "Going to execute $SUBEXPERIMENTS subexperiments in total..." +for DIM_VALUE in "${DIM_VALUES[@]}" +do + for REPLICA in "${REPLICAS[@]}" + do + SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1)) + echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA" + ./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS + sleep 10s + done +done diff --git a/execution/run_uc1-new.sh b/execution/run_uc1-new.sh new file mode 100755 index 0000000000000000000000000000000000000000..540f752b8bca855caef8fc736c5cff05ca6e3b6a --- /dev/null +++ b/execution/run_uc1-new.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +EXP_ID=$1 +DIM_VALUE=$2 +INSTANCES=$3 +PARTITIONS=$4 +EXECUTION_MINUTES=5 + +# Start up Kafka +# TODO + +# Create Topics +#PARTITIONS=40 +#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" +echo "Print topics:" +kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' +PARTITIONS=$PARTITIONS +kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" + +# Start workload generator +NUM_SENSORS=$DIM_VALUE +sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc1-workload-generator/deployment.yaml | kubectl apply -f - + +# Start application +REPLICAS=$INSTANCES +kubectl apply -f uc1-application/aggregation-deployment.yaml +kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS + +# Execute for certain time +sleep ${EXECUTION_MINUTES}m + +# Run eval script +source ../.venv/bin/activate +python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES +deactivate + +# Stop wl and app +kubectl delete -f uc1-workload-generator/deployment.yaml +kubectl delete -f uc1-application/aggregation-deployment.yaml + + +# Delete topics instead of Kafka +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' +#sleep 30s # TODO check +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" + +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +echo "Finished execution, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 +do + kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" + echo "Wait for topic deletion" + sleep 5s + #echo "Finished waiting, print topics:" + #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' + # Sometimes a second deletion seems to be required +done +echo "Finish topic deletion, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +echo "Exiting script" + +KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") +kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc2-new.sh b/execution/run_uc2-new.sh new file mode 100755 index 0000000000000000000000000000000000000000..6742a7316c0c4ce2cfb506eac979fcc20c0c2374 --- /dev/null +++ b/execution/run_uc2-new.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +EXP_ID=$1 +DIM_VALUE=$2 +INSTANCES=$3 +PARTITIONS=$4 +EXECUTION_MINUTES=5 + +# Maybe start up Kafka + +# Create Topics +#PARTITIONS=40 +#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" +PARTITIONS=$PARTITIONS +kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" + +# Start workload generator +NUM_NESTED_GROUPS=$DIM_VALUE +sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deployment.yaml | kubectl apply -f - + +# Start application +REPLICAS=$INSTANCES +kubectl apply -f uc2-application/aggregation-deployment.yaml +kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS + +# Execute for certain time +sleep ${EXECUTION_MINUTES}m + +# Run eval script +source ../.venv/bin/activate +python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES +deactivate + +# Stop wl and app +kubectl delete -f uc2-workload-generator/deployment.yaml +kubectl delete -f uc2-application/aggregation-deployment.yaml + + +# Delete topics instead of Kafka +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' +#sleep 30s # TODO check +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" + +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +echo "Finished execution, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 +do + kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" + echo "Wait for topic deletion" + sleep 5s + #echo "Finished waiting, print topics:" + #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' + # Sometimes a second deletion seems to be required +done +echo "Finish topic deletion, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +echo "Exiting script" + +KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") +kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc3-new.sh b/execution/run_uc3-new.sh new file mode 100755 index 0000000000000000000000000000000000000000..c5c0f9eba070d17a71866eab46768721399a2724 --- /dev/null +++ b/execution/run_uc3-new.sh @@ -0,0 +1,64 @@ +#!/bin/bash + +EXP_ID=$1 +DIM_VALUE=$2 +INSTANCES=$3 +PARTITIONS=$4 +EXECUTION_MINUTES=5 + +# Maybe start up Kafka + +# Create Topics +#PARTITIONS=40 +#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" +PARTITIONS=$PARTITIONS +kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" + +# Start workload generator +NUM_SENSORS=$DIM_VALUE +sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc3-workload-generator/deployment.yaml | kubectl apply -f - + +# Start application +REPLICAS=$INSTANCES +kubectl apply -f uc3-application/aggregation-deployment.yaml +kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS + +# Execute for certain time +sleep ${EXECUTION_MINUTES}m + +# Run eval script +source ../.venv/bin/activate +python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES +deactivate + +# Stop wl and app +kubectl delete -f uc3-workload-generator/deployment.yaml +kubectl delete -f uc3-application/aggregation-deployment.yaml + + +# Delete topics instead of Kafka +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' +#sleep 30s # TODO check +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" + +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +echo "Finished execution, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 +do + kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" + echo "Wait for topic deletion" + sleep 5s + #echo "Finished waiting, print topics:" + #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' + # Sometimes a second deletion seems to be required +done +echo "Finish topic deletion, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +echo "Exiting script" + +KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") +kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc4-new.sh b/execution/run_uc4-new.sh new file mode 100755 index 0000000000000000000000000000000000000000..607aecfcfc8a7799dd641d9bc8ce105eda523a24 --- /dev/null +++ b/execution/run_uc4-new.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +EXP_ID=$1 +DIM_VALUE=$2 +INSTANCES=$3 +PARTITIONS=$4 +EXECUTION_MINUTES=5 + +# Maybe start up Kafka + +# Create Topics +#PARTITIONS=40 +#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" +PARTITIONS=$PARTITIONS +kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" + +# Start workload generator +NUM_SENSORS=$DIM_VALUE +#NUM_SENSORS=xy +sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml | kubectl apply -f - + +# Start application +REPLICAS=$INSTANCES +#AGGREGATION_DURATION_DAYS=$DIM_VALUE +kubectl apply -f uc4-application/aggregation-deployment.yaml +#sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f - +kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS + +# Execute for certain time +sleep ${EXECUTION_MINUTES}m + +# Run eval script +source ../.venv/bin/activate +python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES +deactivate + +# Stop wl and app +kubectl delete -f uc4-workload-generator/deployment.yaml +kubectl delete -f uc4-application/aggregation-deployment.yaml + + +# Delete topics instead of Kafka +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' +#sleep 30s # TODO check +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" + +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" +echo "Finished execution, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 +do + kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" + echo "Wait for topic deletion" + sleep 5s + #echo "Finished waiting, print topics:" + #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' + # Sometimes a second deletion seems to be required +done +echo "Finish topic deletion, print topics:" +#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -r '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' +echo "Exiting script" + + +#TODO maybe delete schemas +#https://docs.confluent.io/current/schema-registry/schema-deletion-guidelines.html +#curl -X DELETE http://localhost:8081/subjects/Kafka-value + +KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") +kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/uc1-application/aggregation-deployment.yaml b/execution/uc1-application/aggregation-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cfbc3bcdd85cd3cac605d2251370aec99392b2f3 --- /dev/null +++ b/execution/uc1-application/aggregation-deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-aggregation + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: uc1-application + image: "benediktwetzel/uc1-app:latest" + ports: + - containerPort: 5555 + name: jmx + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: COMMIT_INTERVAL_MS + value: "100" + - name: JAVA_OPTS + value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" + - name: prometheus-jmx-exporter + image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-aggregation/jmx-kafka-prometheus.yml + ports: + - containerPort: 5556 + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-aggregation + volumes: + - name: jmx-config + configMap: + name: aggregation-jmx-configmap \ No newline at end of file diff --git a/execution/uc1-application/aggregation-service.yaml b/execution/uc1-application/aggregation-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf --- /dev/null +++ b/execution/uc1-application/aggregation-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: titan-ccp-aggregation + labels: + app: titan-ccp-aggregation +spec: + #type: NodePort + selector: + app: titan-ccp-aggregation + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: metrics + port: 5556 diff --git a/execution/uc1-application/jmx-configmap.yaml b/execution/uc1-application/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667 --- /dev/null +++ b/execution/uc1-application/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregation-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc1-application/service-monitor.yaml b/execution/uc1-application/service-monitor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096 --- /dev/null +++ b/execution/uc1-application/service-monitor.yaml @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: titan-ccp-aggregation + appScope: titan-ccp + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + endpoints: + - port: metrics + interval: 10s diff --git a/execution/uc1-workload-generator/deployment.yaml b/execution/uc1-workload-generator/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f82519ebdade4069c11b50ea100c8ddd3ed3cf51 --- /dev/null +++ b/execution/uc1-workload-generator/deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-load-generator +spec: + selector: + matchLabels: + app: titan-ccp-load-generator + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-load-generator + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: workload-generator + image: benediktwetzel/uc1-wg:latest + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: NUM_SENSORS + value: "{{NUM_SENSORS}}" + \ No newline at end of file diff --git a/execution/uc1-workload-generator/jmx-configmap.yaml b/execution/uc1-workload-generator/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea16037d74978a9273936c26eb06420983dd3139 --- /dev/null +++ b/execution/uc1-workload-generator/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: load-generator-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc2-application/aggregation-deployment.yaml b/execution/uc2-application/aggregation-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..206e2606b165d28fb42b0c6fa7f50b55d6d0d8e5 --- /dev/null +++ b/execution/uc2-application/aggregation-deployment.yaml @@ -0,0 +1,49 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-aggregation + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: uc2-application + image: "benediktwetzel/uc2-app:latest" + ports: + - containerPort: 5555 + name: jmx + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: COMMIT_INTERVAL_MS + value: "10" + - name: JAVA_OPTS + value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" + - name: prometheus-jmx-exporter + image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-aggregation/jmx-kafka-prometheus.yml + ports: + - containerPort: 5556 + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-aggregation + volumes: + - name: jmx-config + configMap: + name: aggregation-jmx-configmap \ No newline at end of file diff --git a/execution/uc2-application/aggregation-service.yaml b/execution/uc2-application/aggregation-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf --- /dev/null +++ b/execution/uc2-application/aggregation-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: titan-ccp-aggregation + labels: + app: titan-ccp-aggregation +spec: + #type: NodePort + selector: + app: titan-ccp-aggregation + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: metrics + port: 5556 diff --git a/execution/uc2-application/jmx-configmap.yaml b/execution/uc2-application/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667 --- /dev/null +++ b/execution/uc2-application/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregation-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc2-application/service-monitor.yaml b/execution/uc2-application/service-monitor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096 --- /dev/null +++ b/execution/uc2-application/service-monitor.yaml @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: titan-ccp-aggregation + appScope: titan-ccp + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + endpoints: + - port: metrics + interval: 10s diff --git a/execution/uc2-workload-generator/deployment.yaml b/execution/uc2-workload-generator/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52592626f2a6bf93415c29f5bb4f020b527a5899 --- /dev/null +++ b/execution/uc2-workload-generator/deployment.yaml @@ -0,0 +1,28 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-load-generator +spec: + selector: + matchLabels: + app: titan-ccp-load-generator + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-load-generator + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: workload-generator + image: benediktwetzel/uc2-wg:latest + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: HIERARCHY + value: "full" + - name: NUM_SENSORS + value: "4" + - name: NUM_NESTED_GROUPS + value: "{{NUM_NESTED_GROUPS}}" + \ No newline at end of file diff --git a/execution/uc2-workload-generator/jmx-configmap.yaml b/execution/uc2-workload-generator/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea16037d74978a9273936c26eb06420983dd3139 --- /dev/null +++ b/execution/uc2-workload-generator/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: load-generator-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc3-application/aggregation-deployment.yaml b/execution/uc3-application/aggregation-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cce675eb515fb52435f202bcac734c44c7c36453 --- /dev/null +++ b/execution/uc3-application/aggregation-deployment.yaml @@ -0,0 +1,51 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-aggregation + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: uc3-application + image: "soerenhenning/uc3-app:latest" + ports: + - containerPort: 5555 + name: jmx + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: KAFKA_WINDOW_DURATION_MINUTES + value: "1" + - name: COMMIT_INTERVAL_MS + value: "100" + - name: JAVA_OPTS + value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" + - name: prometheus-jmx-exporter + image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-aggregation/jmx-kafka-prometheus.yml + ports: + - containerPort: 5556 + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-aggregation + volumes: + - name: jmx-config + configMap: + name: aggregation-jmx-configmap \ No newline at end of file diff --git a/execution/uc3-application/aggregation-service.yaml b/execution/uc3-application/aggregation-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf --- /dev/null +++ b/execution/uc3-application/aggregation-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: titan-ccp-aggregation + labels: + app: titan-ccp-aggregation +spec: + #type: NodePort + selector: + app: titan-ccp-aggregation + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: metrics + port: 5556 diff --git a/execution/uc3-application/jmx-configmap.yaml b/execution/uc3-application/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667 --- /dev/null +++ b/execution/uc3-application/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregation-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc3-application/service-monitor.yaml b/execution/uc3-application/service-monitor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096 --- /dev/null +++ b/execution/uc3-application/service-monitor.yaml @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: titan-ccp-aggregation + appScope: titan-ccp + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + endpoints: + - port: metrics + interval: 10s diff --git a/execution/uc3-workload-generator/deployment.yaml b/execution/uc3-workload-generator/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b78653e23d62324d4c88da53c23f75184efaa564 --- /dev/null +++ b/execution/uc3-workload-generator/deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-load-generator +spec: + selector: + matchLabels: + app: titan-ccp-load-generator + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-load-generator + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: workload-generator + image: benediktwetzel/uc3-wg:latest + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: NUM_SENSORS + value: "{{NUM_SENSORS}}" + \ No newline at end of file diff --git a/execution/uc3-workload-generator/jmx-configmap.yaml b/execution/uc3-workload-generator/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea16037d74978a9273936c26eb06420983dd3139 --- /dev/null +++ b/execution/uc3-workload-generator/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: load-generator-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc4-application/aggregation-deployment.yaml b/execution/uc4-application/aggregation-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0d606b0e21b13d4efe04cacd68d9bd5b7dafd65 --- /dev/null +++ b/execution/uc4-application/aggregation-deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-aggregation + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: uc4-application + image: "soerenhenning/uc4-app:latest" + ports: + - containerPort: 5555 + name: jmx + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: AGGREGATION_DURATION_DAYS + value: "7" #AGGREGATION_DURATION_DAYS + - name: AGGREGATION_DURATION_ADVANCE + value: "1" + - name: COMMIT_INTERVAL_MS + value: "100" + - name: JAVA_OPTS + value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555" + - name: prometheus-jmx-exporter + image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143" + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-aggregation/jmx-kafka-prometheus.yml + ports: + - containerPort: 5556 + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-aggregation + volumes: + - name: jmx-config + configMap: + name: aggregation-jmx-configmap \ No newline at end of file diff --git a/execution/uc4-application/aggregation-service.yaml b/execution/uc4-application/aggregation-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85432d04f225c30469f3232153ef6bd72bd02bdf --- /dev/null +++ b/execution/uc4-application/aggregation-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: titan-ccp-aggregation + labels: + app: titan-ccp-aggregation +spec: + #type: NodePort + selector: + app: titan-ccp-aggregation + ports: + - name: http + port: 80 + targetPort: 80 + protocol: TCP + - name: metrics + port: 5556 diff --git a/execution/uc4-application/jmx-configmap.yaml b/execution/uc4-application/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..78496a86b1242a89b9e844ead3e700fd0b9a9667 --- /dev/null +++ b/execution/uc4-application/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: aggregation-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false diff --git a/execution/uc4-application/service-monitor.yaml b/execution/uc4-application/service-monitor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e7e758cacb5086305efa26292ddef2afc958096 --- /dev/null +++ b/execution/uc4-application/service-monitor.yaml @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + app: titan-ccp-aggregation + appScope: titan-ccp + name: titan-ccp-aggregation +spec: + selector: + matchLabels: + app: titan-ccp-aggregation + endpoints: + - port: metrics + interval: 10s diff --git a/execution/uc4-workload-generator/deployment.yaml b/execution/uc4-workload-generator/deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6400abc345dcfb902364d3225bc6eb174380eb8b --- /dev/null +++ b/execution/uc4-workload-generator/deployment.yaml @@ -0,0 +1,24 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: titan-ccp-load-generator +spec: + selector: + matchLabels: + app: titan-ccp-load-generator + replicas: 1 + template: + metadata: + labels: + app: titan-ccp-load-generator + spec: + terminationGracePeriodSeconds: 0 + containers: + - name: workload-generator + image: soerenhenning/uc4-wg:latest + env: + - name: KAFKA_BOOTSTRAP_SERVERS + value: "my-confluent-cp-kafka:9092" + - name: NUM_SENSORS + value: "{{NUM_SENSORS}}" + \ No newline at end of file diff --git a/execution/uc4-workload-generator/jmx-configmap.yaml b/execution/uc4-workload-generator/jmx-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea16037d74978a9273936c26eb06420983dd3139 --- /dev/null +++ b/execution/uc4-workload-generator/jmx-configmap.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: load-generator-jmx-configmap +data: + jmx-kafka-prometheus.yml: |+ + jmxUrl: service:jmx:rmi:///jndi/rmi://localhost:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false