Skip to content
Snippets Groups Projects
Commit f72d55bc authored by Björn Vonheiden's avatar Björn Vonheiden
Browse files

Merge branch 'feature/kustomize' into feature/runUcPython

parents 3d627413 b4871a47
No related branches found
No related tags found
2 merge requests!42Integerate theodolite and run uc python scripts,!24run UC as python implementation
Showing
with 338 additions and 95 deletions
......@@ -29,38 +29,59 @@ NUM_SENSORS=$DIM_VALUE
WL_MAX_RECORDS=150000
WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml)
echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
cat <<EOF >uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: titan-ccp-load-generator
spec:
replicas: $WL_INSTANCES
template:
spec:
containers:
- name: workload-generator
env:
- name: NUM_SENSORS
value: "$NUM_SENSORS"
- name: INSTANCES
value: "$WL_INSTANCES"
EOF
kubectl apply -k uc-workload-generator/overlay/uc1-workload-generator
# Start application
REPLICAS=$INSTANCES
# When not using `sed` anymore, use `kubectl apply -f uc1-application`
kubectl apply -f uc1-application/aggregation-service.yaml
kubectl apply -f uc1-application/jmx-configmap.yaml
kubectl apply -f uc1-application/service-monitor.yaml
#kubectl apply -f uc1-application/aggregation-deployment.yaml
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc1-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
cat <<EOF >uc-application/overlay/uc1-application/set_paramters.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
resources:
limits:
memory: $MEMORY_LIMIT
cpu: $CPU_LIMIT
EOF
kubectl apply -k uc-application/overlay/uc1-application
kubectl scale deployment uc1-titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
sleep ${EXECUTION_MINUTES}m
sleep $(($EXECUTION_MINUTES * 60))
# Run eval script
source ../.venv/bin/activate
python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
deactivate
# Stop wl and app
#kubectl delete -f uc1-workload-generator/deployment.yaml
#sed "s/{{INSTANCES}}/1/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
#sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc1-workload-generator/deployment.yaml | kubectl delete -f -
echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
kubectl delete -f uc1-application/aggregation-service.yaml
kubectl delete -f uc1-application/jmx-configmap.yaml
kubectl delete -f uc1-application/service-monitor.yaml
#kubectl delete -f uc1-application/aggregation-deployment.yaml
echo "$APPLICATION_YAML" | kubectl delete -f -
# Stop workload generator and app
kubectl delete -k uc-workload-generator/overlay/uc1-workload-generator
kubectl delete -k uc-application/overlay/uc1-application
# Delete topics instead of Kafka
......
......@@ -26,34 +26,61 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
# Start workload generator
NUM_NESTED_GROUPS=$DIM_VALUE
sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deployment.yaml | kubectl apply -f -
cat <<EOF >uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: titan-ccp-load-generator
spec:
replicas: 1
template:
spec:
containers:
- name: workload-generator
env:
- name: NUM_SENSORS
value: "4"
- name: HIERARCHY
value: "full"
- name: NUM_NESTED_GROUPS
value: "$NUM_NESTED_GROUPS"
EOF
kubectl apply -k uc-workload-generator/overlay/uc2-workload-generator
# Start application
REPLICAS=$INSTANCES
# When not using `sed` anymore, use `kubectl apply -f uc2-application`
kubectl apply -f uc2-application/aggregation-service.yaml
kubectl apply -f uc2-application/jmx-configmap.yaml
kubectl apply -f uc2-application/service-monitor.yaml
#kubectl apply -f uc2-application/aggregation-deployment.yaml
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc2-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
cat <<EOF >uc-application/overlay/uc2-application/set_paramters.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
resources:
limits:
memory: $MEMORY_LIMIT
cpu: $CPU_LIMIT
EOF
kubectl apply -k uc-application/overlay/uc2-application
kubectl scale deployment uc2-titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
sleep ${EXECUTION_MINUTES}m
sleep $(($EXECUTION_MINUTES * 60))
# Run eval script
source ../.venv/bin/activate
python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
deactivate
# Stop wl and app
kubectl delete -f uc2-workload-generator/deployment.yaml
kubectl delete -f uc2-application/aggregation-service.yaml
kubectl delete -f uc2-application/jmx-configmap.yaml
kubectl delete -f uc2-application/service-monitor.yaml
#kubectl delete -f uc2-application/aggregation-deployment.yaml
echo "$APPLICATION_YAML" | kubectl delete -f -
# Stop workload generator and app
kubectl delete -k uc-workload-generator/overlay/uc2-workload-generator
kubectl delete -k uc-application/overlay/uc2-application
# Delete topics instead of Kafka
......
......@@ -29,40 +29,60 @@ NUM_SENSORS=$DIM_VALUE
WL_MAX_RECORDS=150000
WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc3-workload-generator/deployment.yaml)
echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
cat <<EOF >uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: titan-ccp-load-generator
spec:
replicas: $WL_INSTANCES
template:
spec:
containers:
- name: workload-generator
env:
- name: NUM_SENSORS
value: "$NUM_SENSORS"
- name: INSTANCES
value: "$WL_INSTANCES"
EOF
kubectl apply -k uc-workload-generator/overlay/uc3-workload-generator
# Start application
REPLICAS=$INSTANCES
# When not using `sed` anymore, use `kubectl apply -f uc3-application`
kubectl apply -f uc3-application/aggregation-service.yaml
kubectl apply -f uc3-application/jmx-configmap.yaml
kubectl apply -f uc3-application/service-monitor.yaml
#kubectl apply -f uc3-application/aggregation-deployment.yaml
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc3-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
cat <<EOF >uc-application/overlay/uc3-application/set_paramters.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
resources:
limits:
memory: $MEMORY_LIMIT
cpu: $CPU_LIMIT
EOF
kubectl apply -k uc-application/overlay/uc3-application
kubectl scale deployment uc3-titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
sleep ${EXECUTION_MINUTES}m
sleep $(($EXECUTION_MINUTES * 60))
# Run eval script
source ../.venv/bin/activate
python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
deactivate
# Stop wl and app
#kubectl delete -f uc3-workload-generator/deployment.yaml
#sed "s/{{INSTANCES}}/1/g" uc3-workload-generator/deployment.yaml | kubectl delete -f -
echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
kubectl delete -f uc3-application/aggregation-service.yaml
kubectl delete -f uc3-application/jmx-configmap.yaml
kubectl delete -f uc3-application/service-monitor.yaml
#kubectl delete -f uc3-application/aggregation-deployment.yaml
#sed "s/{{CPU_LIMIT}}/1000m/g; s/{{MEMORY_LIMIT}}/4Gi/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/100/g" uc3-application/aggregation-deployment.yaml | kubectl delete -f -
echo "$APPLICATION_YAML" | kubectl delete -f -
# Stop workload generator and app
kubectl delete -k uc-workload-generator/overlay/uc3-workload-generator
kubectl delete -k uc-application/overlay/uc3-application
# Delete topics instead of Kafka
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
......
......@@ -26,37 +26,57 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
# Start workload generator
NUM_SENSORS=$DIM_VALUE
#NUM_SENSORS=xy
sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml | kubectl apply -f -
cat <<EOF >uuc-workload-generator/overlay/c4-workload-generator/set_paramters.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: titan-ccp-load-generator
spec:
replicas: 1
template:
spec:
containers:
- name: workload-generator
env:
- name: NUM_SENSORS
value: "$NUM_SENSORS"
EOF
kubectl apply -k uc-workload-generator/overlay/uc4-workload-generator
# Start application
REPLICAS=$INSTANCES
#AGGREGATION_DURATION_DAYS=$DIM_VALUE
# When not using `sed` anymore, use `kubectl apply -f uc4-application`
kubectl apply -f uc4-application/aggregation-service.yaml
kubectl apply -f uc4-application/jmx-configmap.yaml
kubectl apply -f uc4-application/service-monitor.yaml
#kubectl apply -f uc4-application/aggregation-deployment.yaml
#sed "s/{{AGGREGATION_DURATION_DAYS}}/$AGGREGATION_DURATION_DAYS/g" uc4-application/aggregation-deployment.yaml | kubectl apply -f -
APPLICATION_YAML=$(sed "s/{{CPU_LIMIT}}/$CPU_LIMIT/g; s/{{MEMORY_LIMIT}}/$MEMORY_LIMIT/g; s/{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}/$KAFKA_STREAMS_COMMIT_INTERVAL_MS/g" uc4-application/aggregation-deployment.yaml)
echo "$APPLICATION_YAML" | kubectl apply -f -
kubectl scale deployment titan-ccp-aggregation --replicas=$REPLICAS
cat <<EOF >uc-application/overlay/uc4-application/set_paramters.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS"
resources:
limits:
memory: $MEMORY_LIMIT
cpu: $CPU_LIMIT
EOF
kubectl apply -k uc-application/overlay/uc4-application
kubectl scale deployment uc4-titan-ccp-aggregation --replicas=$REPLICAS
# Execute for certain time
sleep ${EXECUTION_MINUTES}m
sleep $(($EXECUTION_MINUTES * 60))
# Run eval script
source ../.venv/bin/activate
python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
deactivate
# Stop wl and app
kubectl delete -f uc4-workload-generator/deployment.yaml
kubectl delete -f uc4-application/aggregation-service.yaml
kubectl delete -f uc4-application/jmx-configmap.yaml
kubectl delete -f uc4-application/service-monitor.yaml
#kubectl delete -f uc4-application/aggregation-deployment.yaml
echo "$APPLICATION_YAML" | kubectl delete -f -
# Stop workload generator and app
kubectl delete -k uc-workload-generator/overlay/uc4-workload-generator
kubectl delete -k uc-application/overlay/uc4-application
# Delete topics instead of Kafka
......
......@@ -14,8 +14,8 @@ spec:
spec:
terminationGracePeriodSeconds: 0
containers:
- name: uc1-application
image: "soerenhenning/uc1-app:latest"
- name: uc-application
image: uc-app:latest
ports:
- containerPort: 5555
name: jmx
......@@ -23,13 +23,13 @@ spec:
- name: KAFKA_BOOTSTRAP_SERVERS
value: "my-confluent-cp-kafka:9092"
- name: COMMIT_INTERVAL_MS
value: "{{KAFKA_STREAMS_COMMIT_INTERVAL_MS}}"
value: 100
- name: JAVA_OPTS
value: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=5555"
resources:
limits:
memory: "{{MEMORY_LIMIT}}"
cpu: "{{CPU_LIMIT}}"
memory: 4Gi
cpu: 1000m
- name: prometheus-jmx-exporter
image: "solsson/kafka-prometheus-jmx-exporter@sha256:6f82e2b0464f50da8104acd7363fb9b995001ddff77d248379f8788e78946143"
command:
......@@ -50,4 +50,4 @@ spec:
volumes:
- name: jmx-config
configMap:
name: aggregation-jmx-configmap
\ No newline at end of file
name: aggregation-jmx-configmap
apiVersion: v1
kind: Service
metadata:
metadata:
name: titan-ccp-aggregation
labels:
app: titan-ccp-aggregation
spec:
#type: NodePort
selector:
selector:
app: titan-ccp-aggregation
ports:
ports:
- name: http
port: 80
targetPort: 80
......
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
commonLabels:
app: titan-ccp-aggregation
# Use all resources to compose them into one file
resources:
- aggregation-deployment.yaml
- aggregation-service.yaml
- service-monitor.yaml
- jmx-configmap.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: uc1-
images:
- name: uc-app
newName: soerenhenning/uc1-app
newTag: latest
bases:
- ../../base
patchesStrategicMerge:
- set_paramters.yaml # Patch setting the resource parameters
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "100"
resources:
limits:
memory: 4Gi
cpu: 1000m
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: uc2-
images:
- name: uc-app
newName: soerenhenning/uc2-app
newTag: latest
bases:
- ../../base
patchesStrategicMerge:
- set_paramters.yaml # Patch setting the resource parameters
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "100"
resources:
limits:
memory: 4Gi
cpu: 1000m
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: uc3-
images:
- name: uc-app
newName: soerenhenning/uc3-app
newTag: latest
bases:
- ../../base
patchesStrategicMerge:
- set_paramters.yaml # Patch setting the resource parameters
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: 100
resources:
limits:
memory: 4Gi
cpu: 1000m
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: uc4-
images:
- name: uc-app
newName: soerenhenning/uc4-app
newTag: latest
bases:
- ../../base
patchesStrategicMerge:
- set_paramters.yaml # Patch setting the resource parameters
apiVersion: apps/v1
kind: Deployment
metadata:
name: titan-ccp-aggregation
spec:
template:
spec:
containers:
- name: uc-application
env:
- name: COMMIT_INTERVAL_MS
value: "100"
resources:
limits:
memory: 4Gi
cpu: 1000m
......@@ -7,7 +7,7 @@ spec:
matchLabels:
app: titan-ccp-load-generator
serviceName: titan-ccp-load-generator
replicas: {{INSTANCES}}
replicas: 1
template:
metadata:
labels:
......@@ -16,16 +16,15 @@ spec:
terminationGracePeriodSeconds: 0
containers:
- name: workload-generator
image: soerenhenning/uc3-wg:latest
image: workload-generator:latest
env:
- name: KAFKA_BOOTSTRAP_SERVERS
value: "my-confluent-cp-kafka:9092"
- name: NUM_SENSORS
value: "{{NUM_SENSORS}}"
value: "25000"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: INSTANCES
value: "{{INSTANCES}}"
\ No newline at end of file
value: "1"
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namePrefix: uc1-
images:
- name: workload-generator
newName: soerenhenning/uc1-wl
newTag: latest
bases:
- ../../base
patchesStrategicMerge:
- set_paramters.yaml # Patch setting the resource parameters
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment