Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • she/theodolite
1 result
Show changes
Commits on Source (111)
Showing
with 413 additions and 64 deletions
......@@ -25,6 +25,11 @@ build:
tags:
- exec-docker
script: ./gradlew --build-cache assemble
artifacts:
paths:
- "build/libs/*.jar"
- "*/build/distributions/*.tar"
expire_in: 1 day
test:
stage: test
......@@ -76,7 +81,7 @@ spotbugs:
.deploy:
stage: deploy
tags:
- exec-docker
- exec-dind
# see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled
# for image usage and settings for building with TLS and docker in docker
image: docker:19.03.1
......@@ -86,14 +91,157 @@ spotbugs:
DOCKER_TLS_CERTDIR: "/certs"
script:
- DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//')
- docker build --pull -t titan-ccp-exp-bigdata19-bridge ./exp-bigdata19-bridge
- docker tag titan-ccp-exp-bigdata19-bridge $DOCKERHUB_ORG/titan-ccp-exp-bigdata19-bridge:${DOCKER_TAG_NAME}latest
- docker tag titan-ccp-exp-bigdata19-bridge $DOCKERHUB_ORG/titan-ccp-exp-bigdata19-bridge:$DOCKER_TAG_NAME$CI_COMMIT_SHA
- docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME
- "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest"
- "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA"
- "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG"
- echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin
- docker push $DOCKERHUB_ORG/titan-ccp-exp-bigdata19-bridge
- docker push $DOCKERHUB_ORG/$IMAGE_NAME
- docker logout
only:
variables:
- $DOCKERHUB_ORG
- $DOCKERHUB_ID
- $DOCKERHUB_PW
rules:
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
# - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc1-kstreams-app:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc1-kstreams-app"
JAVA_PROJECT_NAME: "uc1-application"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc1-application/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc2-kstreams-app:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc2-kstreams-app"
JAVA_PROJECT_NAME: "uc2-application"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc2-application/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc3-kstreams-app:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc3-kstreams-app"
JAVA_PROJECT_NAME: "uc3-application"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc3-application/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc4-kstreams-app:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc4-kstreams-app"
JAVA_PROJECT_NAME: "uc4-application"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc4-application/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc1-workload-generator:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc1-workload-generator"
JAVA_PROJECT_NAME: "uc1-workload-generator"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc1-workload-generator/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc2-workload-generator:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc2-workload-generator"
JAVA_PROJECT_NAME: "uc2-workload-generator"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc2-workload-generator/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc3-workload-generator:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc3-workload-generator"
JAVA_PROJECT_NAME: "uc3-workload-generator"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc3-workload-generator/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
deploy-uc4-workload-generator:
extends: .deploy
variables:
IMAGE_NAME: "theodolite-uc4-workload-generator"
JAVA_PROJECT_NAME: "uc4-workload-generator"
rules: # hope this can be simplified soon, see #51
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG"
when: always
- changes:
- uc4-workload-generator/**/*
- application-kafkastreams-commons/**/*
if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: always
- if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual
allow_failure: true
......@@ -101,7 +101,7 @@ sp_cleanup.qualify_static_member_accesses_with_declaring_class=true
sp_cleanup.qualify_static_method_accesses_with_declaring_class=false
sp_cleanup.remove_private_constructors=true
sp_cleanup.remove_redundant_modifiers=false
sp_cleanup.remove_redundant_semicolons=false
sp_cleanup.remove_redundant_semicolons=true
sp_cleanup.remove_redundant_type_arguments=true
sp_cleanup.remove_trailing_whitespaces=true
sp_cleanup.remove_trailing_whitespaces_all=true
......
package theodolite.uc4.application;
package theodolite.commons.kafkastreams;
/**
* Keys to access configuration parameters.
*/
public final class ConfigurationKeys {
// Common keys
public static final String APPLICATION_NAME = "application.name";
public static final String APPLICATION_VERSION = "application.version";
public static final String NUM_THREADS = "num.threads";
public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
public static final String KAFKA_BOOTSTRAP_SERVERS = "kafka.bootstrap.servers";
public static final String SCHEMA_REGISTRY_URL = "schema.registry.url";
public static final String KAFKA_INPUT_TOPIC = "kafka.input.topic";
// Additional topics
public static final String KAFKA_FEEDBACK_TOPIC = "kafka.feedback.topic";
public static final String KAFKA_OUTPUT_TOPIC = "kafka.output.topic";
public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
public static final String KAFKA_CONFIGURATION_TOPIC = "kafka.configuration.topic";
public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
// UC2
public static final String EMIT_PERIOD_MS = "emit.period.ms";
public static final String NUM_THREADS = "num.threads";
public static final String GRACE_PERIOD_MS = "grace.period.ms";
public static final String COMMIT_INTERVAL_MS = "commit.interval.ms";
// UC3
public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
public static final String CACHE_MAX_BYTES_BUFFERING = "cache.max.bytes.buffering";
// UC4
public static final String AGGREGATION_DURATION_DAYS = "aggregation.duration.days";
public static final String AGGREGATION_ADVANCE_DAYS = "aggregation.advance.days";
public static final String KAFKA_WINDOW_DURATION_MINUTES = "kafka.window.duration.minutes";
private ConfigurationKeys() {}
......
......@@ -13,6 +13,8 @@ import titan.ccp.common.kafka.streams.PropertiesBuilder;
public abstract class KafkaStreamsBuilder {
// Kafkastreams application specific
protected String schemaRegistryUrl; // NOPMD for use in subclass
private String applicationName; // NOPMD
private String applicationVersion; // NOPMD
private String bootstrapServers; // NOPMD
......@@ -55,6 +57,17 @@ public abstract class KafkaStreamsBuilder {
return this;
}
/**
* Sets the URL for the schema registry.
*
* @param url The URL of the schema registry.
* @return
*/
public KafkaStreamsBuilder schemaRegistry(final String url) {
this.schemaRegistryUrl = url;
return this;
}
/**
* Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
* one for using the default.
......@@ -131,9 +144,10 @@ public abstract class KafkaStreamsBuilder {
*/
public KafkaStreams build() {
// Check for required attributes for building properties.
Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
Objects.requireNonNull(this.applicationName, "Application name has not been set.");
Objects.requireNonNull(this.applicationVersion, "Application version has not been set.");
Objects.requireNonNull(this.bootstrapServers, "Bootstrap server has not been set.");
Objects.requireNonNull(this.schemaRegistryUrl, "Schema registry has not been set.");
// Create the Kafka streams instance.
return new KafkaStreams(this.buildTopology(), this.buildProperties());
......
......@@ -12,9 +12,10 @@ buildscript {
// Variables used to distinct different subprojects
def useCaseProjects = subprojects.findAll {it -> it.name.matches('uc(.)*')}
def useCaseApplications = subprojects.findAll {it -> it.name.matches('uc[0-9]+-application')}
def useCaseGenerators = subprojects.findAll {it -> it.name.matches('uc[0-9]+-workload-generator*')}
def commonProjects = subprojects.findAll {it -> it.name.matches('(.)*commons(.)*')}
// Plugins
allprojects {
apply plugin: 'eclipse'
......@@ -51,22 +52,22 @@ allprojects {
maven {
url "https://oss.sonatype.org/content/repositories/snapshots/"
}
maven {
url 'https://packages.confluent.io/maven/'
}
}
}
// Dependencies for all use cases
configure(useCaseProjects) {
// Dependencies for all use case applications
configure(useCaseApplications) {
dependencies {
// These dependencies is exported to consumers, that is to say found on their compile classpath.
api('org.industrial-devops:titan-ccp-common:0.0.3-SNAPSHOT') { changing = true }
api 'net.kieker-monitoring:kieker:1.14-SNAPSHOT'
api 'net.sourceforge.teetime:teetime:3.0'
// These dependencies are used internally, and not exposed to consumers on their own compile classpath.
implementation 'org.apache.kafka:kafka-clients:2.1.0'
implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers
implementation 'com.google.guava:guava:24.1-jre'
implementation 'org.jctools:jctools-core:2.1.1'
implementation 'org.slf4j:slf4j-simple:1.6.1'
implementation 'org.slf4j:slf4j-simple:1.7.25'
implementation project(':application-kafkastreams-commons')
// Use JUnit test framework
......@@ -74,15 +75,31 @@ configure(useCaseProjects) {
}
}
// Dependencies for all use case generators
configure(useCaseGenerators) {
dependencies {
// These dependencies are used internally, and not exposed to consumers on their own compile classpath.
implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
implementation 'com.google.guava:guava:24.1-jre'
implementation 'org.jctools:jctools-core:2.1.1'
implementation 'org.slf4j:slf4j-simple:1.7.25'
// These dependencies are used for the workload-generator-commmon
implementation project(':workload-generator-commons')
// Use JUnit test framework
testImplementation 'junit:junit:4.12'
}
}
// Dependencies for all commons
configure(commonProjects) {
dependencies {
// These dependencies is exported to consumers, that is to say found on their compile classpath.
api 'org.apache.kafka:kafka-clients:2.4.0'
// These dependencies are used internally, and not exposed to consumers on their own compile classpath.
implementation 'org.slf4j:slf4j-simple:1.6.1'
implementation('org.industrial-devops:titan-ccp-common:0.0.3-SNAPSHOT') { changing = true }
implementation 'org.slf4j:slf4j-simple:1.7.25'
implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true }
implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
// Use JUnit test framework
testImplementation 'junit:junit:4.12'
......
......@@ -16,11 +16,11 @@ services:
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
uc-app:
image: benediktwetzel/uc1-app:latest
image: theodolite/theodolite-uc1-kstreams-app:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
uc-wg:
image: benediktwetzel/uc1-wg:latest
image: theodolite/theodolite-uc1-workload-generator:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
NUM_SENSORS: 1
......@@ -16,11 +16,11 @@ services:
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
uc-app:
image: benediktwetzel/uc2-app:latest
image: theodolite/theodolite-uc2-kstreams-app:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
uc-wg:
image: benediktwetzel/uc2-wg:latest
image: theodolite/theodolite-uc2-workload-generator:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
NUM_SENSORS: 1
\ No newline at end of file
......@@ -16,12 +16,12 @@ services:
KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: 30000
KAFKA_CREATE_TOPICS: "input:3:1,output:3:1,configuration:3:1,aggregation-feedback:3:1,dayofweek:3:1,hourofday:3:1,hourofweek:3:1"
uc-app:
image: benediktwetzel/uc3-app:latest
image: theodolite/theodolite-uc3-kstreams-app:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
KAFKA_WINDOW_DURATION_MINUTES: 60
uc-wg:
image: benediktwetzel/uc3-wg:latest
image: theodolite/theodolite-uc3-workload-generator:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
NUM_SENSORS: 1
\ No newline at end of file
......@@ -26,11 +26,11 @@ services:
SCHEMA_REGISTRY_HOST_NAME: schema-registry
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181'
uc-app:
image: soerenhenning/uc4-app:latest #TODO
image: theodolite/theodolite-uc4-kstreams-app:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
SCHEMA_REGISTRY_URL: http://schema-registry:8081
uc-wg:
image: soerenhenning/uc4-wg:latest #TODO
image: theodolite/theodolite-uc4-workload-generator:latest
environment:
KAFKA_BOOTSTRAP_SERVERS: kafka:9092
# Release Process
We assume that we are creating the release `v0.1.1`. Please make sure to update
to modify the following steps according to the release, you are actually
performing.
1. Create a new branch `v0.1` if not already exists. This branch will never
again be merged into master.
2. Checkout the `v0.1 branch.
3. Update all references to Theodolite Docker images to tag `v0-1-1`. These are
mainly the Kubernetes resource definitions in `execution` as well as the Docker
Compose files in `docker-test`.
4. Commit these changes.
5. Tag this commit with `v0.1.1`. The corresponding Docker images will be uploaded.
......@@ -102,7 +102,15 @@ Other Kafka deployments, for example, using Strimzi, should work in a similar wa
A permanently running pod used for Kafka configuration is started via:
```sh
kubectl apply -f infrastructure/kafka/kafka-client.yaml
kubectl apply -f infrastructure/kafka/kafka-client.yaml
```
#### A Zookeeper Client Pod
Also a permanently running pod for ZooKeeper access is started via:
```sh
kubectl apply -f infrastructure/zookeeper-client.yaml
```
#### The Kafka Lag Exporter
......@@ -142,15 +150,15 @@ Depending on your setup, some additional adjustments may be necessary:
## Execution
The `./run_loop.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
The `./theodolite.sh` is the entrypoint for all benchmark executions. Is has to be called as follows:
```sh
./run_loop.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
./theodolite.sh <use-case> <wl-values> <instances> <partitions> <cpu-limit> <memory-limit> <commit-interval> <duration>
```
* `<use-case>`: Stream processing use case to be benchmarked. Has to be one of `1`, `2`, `3` or `4`.
* `<wl-values>`: Values for the workload generator to be tested, separated by commas. For example `100000, 200000, 300000`.
* `<instances>`: Numbers of instances to be benchmarked, separated by commas. For example `1, 2, 3, 4`.
* `<wl-values>`: Values for the workload generator to be tested, separated by commas and quoted. For example `"100000, 200000, 300000"`.
* `<instances>`: Numbers of instances to be benchmarked, separated by commas and quoted. For example `"1, 2, 3, 4"`.
* `<partitions>`: Number of partitions for Kafka topics. Optional. Default `40`.
* `<cpu-limit>`: Kubernetes CPU limit. Optional. Default `1000m`.
* `<memory-limit>`: Kubernetes memory limit. Optional. Default `4Gi`.
......
......@@ -47,8 +47,7 @@ sidecar:
# If specified, the sidecar will search for datasource config-maps inside this namespace.
# Otherwise the namespace in which the sidecar is running will be used.
# It's also possible to specify ALL to search in all namespaces
searchNamespace: default
searchNamespace: null
service:
nodePort: 31199
......
apiVersion: v1
kind: Pod
metadata:
name: kafka-client-2
name: kafka-client
spec:
containers:
- name: kafka-client
......
......@@ -9,4 +9,4 @@ roleRef:
subjects:
- kind: ServiceAccount
name: prometheus
namespace: titan-scalability
\ No newline at end of file
namespace: default
\ No newline at end of file
apiVersion: v1
kind: Pod
metadata:
name: zookeeper-client
spec:
containers:
- name: zookeeper-client
image: confluentinc/cp-zookeeper:5.4.0
command:
- sh
- -c
- "exec tail -f /dev/null"
......@@ -76,7 +76,7 @@ echo "Finished execution, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
do
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists"
echo "Wait for topic deletion"
sleep 5s
#echo "Finished waiting, print topics:"
......@@ -85,6 +85,33 @@ do
done
echo "Finish topic deletion, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
# delete zookeeper nodes used for workload generation
echo "Delete ZooKeeper configurations used for workload generation"
kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
echo "Waiting for deletion"
while [ true ]
do
IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
found=0
for element in "${array[@]}"
do
if [ "$element" == "workload-generation" ]; then
found=1
break
fi
done
if [ $found -ne 1 ]; then
echo "ZooKeeper reset was successful."
break
else
echo "ZooKeeper reset was not successful. Retrying in 5s."
sleep 5s
fi
done
echo "Deletion finished"
echo "Exiting script"
KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
......
......@@ -22,11 +22,16 @@ echo "EXECUTION_MINUTES: $EXECUTION_MINUTES"
#PARTITIONS=40
#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
PARTITIONS=$PARTITIONS
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic aggregation-feedback --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1"
# Start workload generator
NUM_NESTED_GROUPS=$DIM_VALUE
sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g" uc2-workload-generator/deployment.yaml | kubectl apply -f -
WL_MAX_RECORDS=150000
APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS))
WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_NESTED_GROUPS}}/$NUM_NESTED_GROUPS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc2-workload-generator/deployment.yaml)
echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
# Start application
REPLICAS=$INSTANCES
......@@ -48,7 +53,8 @@ python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
deactivate
# Stop wl and app
kubectl delete -f uc2-workload-generator/deployment.yaml
#sed "s/{{INSTANCES}}/1/g" uc2-workload-generator/deployment.yaml | kubectl delete -f -
echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
kubectl delete -f uc2-application/aggregation-service.yaml
kubectl delete -f uc2-application/jmx-configmap.yaml
kubectl delete -f uc2-application/service-monitor.yaml
......@@ -67,9 +73,9 @@ echo "$APPLICATION_YAML" | kubectl delete -f -
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'"
echo "Finished execution, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|aggregation-feedback|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
do
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|aggregation-feedback|output|configuration|theodolite-.*' --if-exists"
echo "Wait for topic deletion"
sleep 5s
#echo "Finished waiting, print topics:"
......@@ -78,6 +84,33 @@ do
done
echo "Finish topic deletion, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
# delete zookeeper nodes used for workload generation
echo "Delete ZooKeeper configurations used for workload generation"
kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
echo "Waiting for deletion"
while [ true ]
do
IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
found=0
for element in "${array[@]}"
do
if [ "$element" == "workload-generation" ]; then
found=1
break
fi
done
if [ $found -ne 1 ]; then
echo "ZooKeeper reset was successful."
break
else
echo "ZooKeeper reset was not successful. Retrying in 5s."
sleep 5s
fi
done
echo "Deletion finished"
echo "Exiting script"
KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
......
......@@ -77,7 +77,7 @@ echo "Finished execution, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
do
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists"
echo "Wait for topic deletion"
sleep 5s
#echo "Finished waiting, print topics:"
......@@ -86,6 +86,33 @@ do
done
echo "Finish topic deletion, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
# delete zookeeper nodes used for workload generation
echo "Delete ZooKeeper configurations used for workload generation"
kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
echo "Waiting for deletion"
while [ true ]
do
IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
found=0
for element in "${array[@]}"
do
if [ "$element" == "workload-generation" ]; then
found=1
break
fi
done
if [ $found -ne 1 ]; then
echo "ZooKeeper reset was successful."
break
else
echo "ZooKeeper reset was not successful. Retrying in 5s."
sleep 5s
fi
done
echo "Deletion finished"
echo "Exiting script"
KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
......
......@@ -26,8 +26,11 @@ kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-z
# Start workload generator
NUM_SENSORS=$DIM_VALUE
#NUM_SENSORS=xy
sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g" uc4-workload-generator/deployment.yaml | kubectl apply -f -
WL_MAX_RECORDS=150000
WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS))
WORKLOAD_GENERATOR_YAML=$(sed "s/{{NUM_SENSORS}}/$NUM_SENSORS/g; s/{{INSTANCES}}/$WL_INSTANCES/g" uc4-workload-generator/deployment.yaml)
echo "$WORKLOAD_GENERATOR_YAML" | kubectl apply -f -
# Start application
REPLICAS=$INSTANCES
......@@ -51,7 +54,8 @@ python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES
deactivate
# Stop wl and app
kubectl delete -f uc4-workload-generator/deployment.yaml
#sed "s/{{INSTANCES}}/1/g" uc4-workload-generator/deployment.yaml | kubectl delete -f -
echo "$WORKLOAD_GENERATOR_YAML" | kubectl delete -f -
kubectl delete -f uc4-application/aggregation-service.yaml
kubectl delete -f uc4-application/jmx-configmap.yaml
kubectl delete -f uc4-application/service-monitor.yaml
......@@ -72,7 +76,7 @@ echo "Finished execution, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0
do
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*'"
kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists"
echo "Wait for topic deletion"
sleep 5s
#echo "Finished waiting, print topics:"
......@@ -81,6 +85,33 @@ do
done
echo "Finish topic deletion, print topics:"
#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p'
# delete zookeeper nodes used for workload generation
echo "Delete ZooKeeper configurations used for workload generation"
kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation"
echo "Waiting for deletion"
while [ true ]
do
IFS=', ' read -r -a array <<< $(kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 ls /" | tail -n 1 | awk -F[\]\[] '{print $2}')
found=0
for element in "${array[@]}"
do
if [ "$element" == "workload-generation" ]; then
found=1
break
fi
done
if [ $found -ne 1 ]; then
echo "ZooKeeper reset was successful."
break
else
echo "ZooKeeper reset was not successful. Retrying in 5s."
sleep 5s
fi
done
echo "Deletion finished"
echo "Exiting script"
KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}")
......
......@@ -35,7 +35,7 @@ do
do
SUBEXPERIMENT_COUNTER=$((SUBEXPERIMENT_COUNTER+1))
echo "Run subexperiment $SUBEXPERIMENT_COUNTER/$SUBEXPERIMENTS with config: $DIM_VALUE $REPLICA"
./run_uc$UC-new.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
./run_uc$UC.sh $EXP_ID $DIM_VALUE $REPLICA $PARTITIONS $CPU_LIMIT $MEMORY_LIMIT $KAFKA_STREAMS_COMMIT_INTERVAL_MS $EXECUTION_MINUTES
sleep 10s
done
done