diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0dda0bdb6be4434c91801cb6665364fb7fd63d6a..5743d9f732630259ad5401b53e39db64536d35d2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,32 +1,256 @@ -workflow: - rules: - - if: $CI_MERGE_REQUEST_ID - when: never - - when: always - stages: - - triggers + - build + - test + - check + - deploy + +.dind: + tags: + - exec-dind + # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled + # for image usage and settings for building with TLS and docker in docker + image: docker:19.03.1 + services: + - docker:19.03.1-dind + variables: + DOCKER_TLS_CERTDIR: "/certs" + + +# Theodolite Benchmarks + +.benchmarks: + image: openjdk:11-jdk + tags: + - exec-docker + variables: + GRADLE_OPTS: "-Dorg.gradle.daemon=false" + cache: + paths: + - .gradle + before_script: + - cd benchmarks + - export GRADLE_USER_HOME=`pwd`/.gradle + +build-benchmarks: + stage: build + extends: .benchmarks + script: ./gradlew --build-cache assemble + artifacts: + paths: + - "benchmarks/build/libs/*.jar" + - "benchmarks/*/build/distributions/*.tar" + expire_in: 1 day -benchmarks: - stage: triggers - trigger: - include: benchmarks/.gitlab-ci.yml - strategy: depend +test-benchmarks: + stage: test + extends: .benchmarks + needs: + - build-benchmarks + script: ./gradlew test --continue + artifacts: + reports: + junit: + - "benchmarks/**/build/test-results/test/TEST-*.xml" + +checkstyle-benchmarks: + stage: check + extends: .benchmarks + needs: + - build-benchmarks + - test-benchmarks + script: ./gradlew checkstyle --continue + artifacts: + paths: + - "benchmarks/*/build/reports/checkstyle/main.html" + when: on_failure + expire_in: 1 day + +pmd-benchmarks: + stage: check + extends: .benchmarks + needs: + - build-benchmarks + - test-benchmarks + script: ./gradlew pmd --continue + artifacts: + paths: + - "benchmarks/*/build/reports/pmd/*.html" + when: on_failure + expire_in: 1 day + +spotbugs-benchmarks: + stage: check + extends: .benchmarks + needs: + - build-benchmarks + - test-benchmarks + script: ./gradlew spotbugs --continue + artifacts: + paths: + - "benchmarks/*/build/reports/spotbugs/*.html" + when: on_failure + expire_in: 1 day + +.deploy-benchmarks: + stage: deploy + extends: + - .benchmarks + - .dind + needs: + - build-benchmarks + - checkstyle-benchmarks + - pmd-benchmarks + - spotbugs-benchmarks + script: + - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//') + - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME + - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $CR_HOST/$CR_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest" + - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $CR_HOST/$CR_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA" + - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $CR_HOST/$CR_ORG/$IMAGE_NAME:$CI_COMMIT_TAG" + - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin + - docker push $CR_HOST/$CR_ORG/$IMAGE_NAME + - docker logout rules: - - if: "$CI_COMMIT_TAG" + - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" + when: always - changes: - benchmarks/* - - when: manual + - benchmarks/$JAVA_PROJECT_NAME/**/* + - benchmarks/application-kafkastreams-commons/**/* + - benchmarks/workload-generator-commons/**/* + if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" + when: always + - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" + when: manual allow_failure: true -execution: - stage: triggers - trigger: - include: execution/.gitlab-ci.yml - strategy: depend +deploy-uc1-kstreams-app: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc1-kstreams-app" + JAVA_PROJECT_NAME: "uc1-application" + +deploy-uc2-kstreams-app: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc2-kstreams-app" + JAVA_PROJECT_NAME: "uc2-application" + +deploy-uc3-kstreams-app: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc3-kstreams-app" + JAVA_PROJECT_NAME: "uc3-application" + +deploy-uc4-kstreams-app: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc4-kstreams-app" + JAVA_PROJECT_NAME: "uc4-application" + +deploy-uc1-load-generator: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc1-workload-generator" + JAVA_PROJECT_NAME: "uc1-workload-generator" + +deploy-uc2-load-generator: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc2-workload-generator" + JAVA_PROJECT_NAME: "uc2-workload-generator" + +deploy-uc3-load-generator: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc3-workload-generator" + JAVA_PROJECT_NAME: "uc3-workload-generator" + +deploy-uc4-load-generator: + extends: .deploy-benchmarks + variables: + IMAGE_NAME: "theodolite-uc4-workload-generator" + JAVA_PROJECT_NAME: "uc4-workload-generator" + + +# Theodolite Framework + +.theodolite: + image: openjdk:11-jdk + tags: + - exec-docker + variables: + GRADLE_OPTS: "-Dorg.gradle.daemon=false" + cache: + paths: + - .gradle/wrapper + - .gradle/caches + before_script: + - cd theodolite-quarkus + - export GRADLE_USER_HOME=`pwd`/.gradle + +build-theodolite: + stage: build + extends: .theodolite + # script: ./gradlew --build-cache assemble -Dquarkus.package.type=native + script: ./gradlew --build-cache assemble + artifacts: + paths: + - "theodolite-quarkus/build/lib/*" + - "theodolite-quarkus/build/*-runner.jar" + # - "theodolite-quarkus/build/*-runner" # For native image + expire_in: 1 day + +test-theodolite: + stage: test + extends: .theodolite + needs: + - build-theodolite + script: ./gradlew test --stacktrace + +# Disabled for now +.ktlint-theodolite: + stage: check + extends: .theodolite + needs: + - build-theodolite + - test-theodolite + script: ./gradlew ktlintCheck --continue + +# Disabled for now +.detekt-theodolite: + stage: check + extends: .theodolite + needs: + - build-theodolite + - test-theodolite + script: ./gradlew detekt --continue + +deploy-theodolite: + stage: deploy + extends: + - .theodolite + - .dind + needs: + - build-theodolite + - test-theodolite + script: + - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//') + #- docker build -f src/main/docker/Dockerfile.native -t theodolite . + - docker build -f src/main/docker/Dockerfile.jvm -t theodolite . + - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:${DOCKER_TAG_NAME}latest" + - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA" + - "[ $CI_COMMIT_TAG ] && docker tag theodolite $CR_HOST/$CR_ORG/theodolite:$CI_COMMIT_TAG" + - echo $CR_PW | docker login $CR_HOST -u $CR_USER --password-stdin + - docker push $CR_HOST/$CR_ORG/theodolite + - docker logout rules: - - if: "$CI_COMMIT_TAG" + - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_COMMIT_TAG" + when: always - changes: - - execution/* - - when: manual + - theodolite-quarkus/**/* + if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW" + when: always + - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW" + when: manual allow_failure: true diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..ae409536b477586aaabde687b0bfbaef1ae422d3 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,16 @@ +cff-version: "1.1.0" +message: "If you use Theodolite, please cite it using these metadata." +authors: + - + family-names: Henning + given-names: "Sören" + orcid: "https://orcid.org/0000-0001-6912-2549" + - + family-names: Hasselbring + given-names: Wilhelm + orcid: "https://orcid.org/0000-0001-6625-4335" +title: Theodolite +version: "0.3.0" +repository-code: "https://github.com/cau-se/theodolite" +license: "Apache-2.0" +doi: "10.1016/j.bdr.2021.100209" diff --git a/README.md b/README.md index b1011530b67dad11da2e59e3decd400186f3ed5c..9dcceb9e65a8a50d96e579a1d14c9861eb22cc82 100644 --- a/README.md +++ b/README.md @@ -17,3 +17,10 @@ Theodolite aims to benchmark scalability of stream processing engines for real u ## Theodolite Analysis Tools Theodolite's benchmarking method creates a *scalability graph* allowing to draw conclusions about the scalability of a stream processing engine or its deployment. A scalability graph shows how resource demand evolves with an increasing workload. Theodolite provides Jupyter notebooks for creating such scalability graphs based on benchmarking results from the execution framework. More information can be found in [Theodolite analysis tool](analysis). + + +## How to Cite + +If you use Theodolite, please cite + +> Sören Henning and Wilhelm Hasselbring. (2021). Theodolite: Scalability Benchmarking of Distributed Stream Processing Engines in Microservice Architectures. Big Data Research, Volume 25. DOI: [10.1016/j.bdr.2021.100209](https://doi.org/10.1016/j.bdr.2021.100209). arXiv:[2009.00304](https://arxiv.org/abs/2009.00304). diff --git a/analysis/demand-metric-plot.ipynb b/analysis/demand-metric-plot.ipynb index 985d1fc91caec847f1795234903d1cbb34e3ddba..90ef227dbf6a4566760329b615d5f59b4cc2bc25 100644 --- a/analysis/demand-metric-plot.ipynb +++ b/analysis/demand-metric-plot.ipynb @@ -100,6 +100,8 @@ "outputs": [], "source": [ "plt.style.use('ggplot')\n", + "plt.rcParams['pdf.fonttype'] = 42 # TrueType fonts\n", + "plt.rcParams['ps.fonttype'] = 42 # TrueType fonts\n", "plt.rcParams['axes.facecolor']='w'\n", "plt.rcParams['axes.edgecolor']='555555'\n", "#plt.rcParams['ytick.color']='black'\n", diff --git a/benchmarks/.gitlab-ci.yml b/benchmarks/.gitlab-ci.yml deleted file mode 100644 index 1a4d354701459d4730dab398e0210ab9189d7ad3..0000000000000000000000000000000000000000 --- a/benchmarks/.gitlab-ci.yml +++ /dev/null @@ -1,414 +0,0 @@ -image: openjdk:11-jdk - -# Disable the Gradle daemon for Continuous Integration servers as correctness -# is usually a priority over speed in CI environments. Using a fresh -# runtime for each build is more reliable since the runtime is completely -# isolated from any previous builds. -variables: - GRADLE_OPTS: "-Dorg.gradle.daemon=false" - -cache: - paths: - - .gradle - -before_script: - - cd benchmarks - - export GRADLE_USER_HOME=`pwd`/.gradle - -stages: - - build - - test - - check - - deploy - -build: - stage: build - tags: - - exec-docker - script: ./gradlew --build-cache assemble - artifacts: - paths: - - "benchmarks/build/libs/*.jar" - - "benchmarks/*/build/distributions/*.tar" - expire_in: 1 day - -test: - stage: test - tags: - - exec-docker - script: ./gradlew test --continue - artifacts: - reports: - junit: - - "benchmarks/**/build/test-results/test/TEST-*.xml" - -checkstyle: - stage: check - tags: - - exec-docker - script: ./gradlew checkstyle --continue - artifacts: - paths: - - "benchmarks/*/build/reports/checkstyle/main.html" - when: on_failure - expire_in: 1 day - -pmd: - stage: check - tags: - - exec-docker - script: ./gradlew pmd --continue - artifacts: - paths: - - "benchmarks/*/build/reports/pmd/*.html" - when: on_failure - expire_in: 1 day - -spotbugs: - stage: check - tags: - - exec-docker - script: ./gradlew spotbugs --continue - artifacts: - paths: - - "benchmarks/*/build/reports/spotbugs/*.html" - when: on_failure - expire_in: 1 day - - -.deploy: - stage: deploy - tags: - - exec-dind - # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled - # for image usage and settings for building with TLS and docker in docker - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - DOCKER_TLS_CERTDIR: "/certs" - script: - - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//') - - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME - - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest" - - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA" - - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME $DOCKERHUB_ORG/$IMAGE_NAME:$CI_COMMIT_TAG" - - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin - - docker push $DOCKERHUB_ORG/$IMAGE_NAME - - docker logout - rules: - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51 - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc1-kstreams-app: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc1-kstreams-app" - JAVA_PROJECT_NAME: "uc1-application" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc1-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc2-kstreams-app: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc2-kstreams-app" - JAVA_PROJECT_NAME: "uc2-application" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc2-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc3-kstreams-app: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc3-kstreams-app" - JAVA_PROJECT_NAME: "uc3-application" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc3-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc4-kstreams-app: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc4-kstreams-app" - JAVA_PROJECT_NAME: "uc4-application" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc4-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc1-workload-generator: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc1-workload-generator" - JAVA_PROJECT_NAME: "uc1-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc1-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc2-workload-generator: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc2-workload-generator" - JAVA_PROJECT_NAME: "uc2-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc2-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc3-workload-generator: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc3-workload-generator" - JAVA_PROJECT_NAME: "uc3-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc3-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-uc4-workload-generator: - extends: .deploy - variables: - IMAGE_NAME: "theodolite-uc4-workload-generator" - JAVA_PROJECT_NAME: "uc4-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc4-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -.deploy-ghcr: - stage: deploy - tags: - - exec-dind - # see https://docs.gitlab.com/ee/ci/docker/using_docker_build.html#tls-enabled - # for image usage and settings for building with TLS and docker in docker - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - DOCKER_TLS_CERTDIR: "/certs" - script: - - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//') - - docker build --pull -t $IMAGE_NAME ./$JAVA_PROJECT_NAME - - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:${DOCKER_TAG_NAME}latest" - - "[ ! $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA" - - "[ $CI_COMMIT_TAG ] && docker tag $IMAGE_NAME ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME:$CI_COMMIT_TAG" - - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin - - docker push ghcr.io/$GITHUB_CR_ORG/$IMAGE_NAME - - docker logout - rules: - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - # - $JAVA_PROJECT_NAME/**/* # hope this can be simplified soon, see #51 - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc1-kstreams-app: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc1-kstreams-app" - JAVA_PROJECT_NAME: "uc1-application" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc1-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc2-kstreams-app: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc2-kstreams-app" - JAVA_PROJECT_NAME: "uc2-application" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc2-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc3-kstreams-app: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc3-kstreams-app" - JAVA_PROJECT_NAME: "uc3-application" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc3-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc4-kstreams-app: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc4-kstreams-app" - JAVA_PROJECT_NAME: "uc4-application" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc4-application/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc1-workload-generator: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc1-workload-generator" - JAVA_PROJECT_NAME: "uc1-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc1-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc2-workload-generator: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc2-workload-generator" - JAVA_PROJECT_NAME: "uc2-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc2-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc3-workload-generator: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc3-workload-generator" - JAVA_PROJECT_NAME: "uc3-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc3-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - -deploy-ghcr-uc4-workload-generator: - extends: .deploy-ghcr - variables: - IMAGE_NAME: "theodolite-uc4-workload-generator" - JAVA_PROJECT_NAME: "uc4-workload-generator" - rules: # hope this can be simplified soon, see #51 - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME && $CI_COMMIT_TAG" - when: always - - changes: - - benchmarks/uc4-workload-generator/**/* - - benchmarks/application-kafkastreams-commons/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $IMAGE_NAME && $JAVA_PROJECT_NAME" - when: manual - allow_failure: true - diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 3cb86b68e9d37c53572c6611fad1057b5505e9cc..ea8fb80bb2c2bac6121dbaaf72f742aa0e9c62bb 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -67,7 +67,6 @@ configure(useCaseApplications) { implementation 'org.apache.kafka:kafka-streams:2.6.0' // enable TransformerSuppliers implementation 'com.google.code.gson:gson:2.8.2' implementation 'com.google.guava:guava:24.1-jre' - implementation 'org.jctools:jctools-core:2.1.1' implementation 'org.slf4j:slf4j-simple:1.7.25' implementation project(':application-kafkastreams-commons') @@ -82,8 +81,6 @@ configure(useCaseGenerators) { // These dependencies are used internally, and not exposed to consumers on their own compile classpath. implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true } implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true } - implementation 'com.google.guava:guava:24.1-jre' - implementation 'org.jctools:jctools-core:2.1.1' implementation 'org.slf4j:slf4j-simple:1.7.25' // These dependencies are used for the workload-generator-commmon diff --git a/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644 --- a/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc1-application/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true diff --git a/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..4d01df75552c562406705858b6368ecf59d6e82f 100644 --- a/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc1-workload-generator/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true @@ -66,6 +66,7 @@ org.eclipse.jdt.ui.ignorelowercasenames=true org.eclipse.jdt.ui.importorder=; org.eclipse.jdt.ui.ondemandthreshold=99 org.eclipse.jdt.ui.staticondemandthreshold=99 +org.eclipse.jdt.ui.text.custom_code_templates= sp_cleanup.add_default_serial_version_id=true sp_cleanup.add_generated_serial_version_id=false sp_cleanup.add_missing_annotations=true diff --git a/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java b/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java index a7b27dfdb25760f0b96c930c9705c2eed0402442..26741eb33b2a8d1c23a40938d1261254ac37b636 100644 --- a/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java +++ b/benchmarks/uc1-workload-generator/src/main/java/theodolite/uc1/workloadgenerator/LoadGenerator.java @@ -1,94 +1,23 @@ package theodolite.uc1.workloadgenerator; import java.io.IOException; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.Objects; -import java.util.Properties; -import org.apache.kafka.clients.producer.ProducerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; -import titan.ccp.model.records.ActivePowerRecord; /** - * Load Generator for UC1. + * Load Generator for Theodolite use case UC1. */ public final class LoadGenerator { private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class); - private static final long MAX_DURATION_IN_DAYS = 30L; - private LoadGenerator() {} /** - * Entry point. + * Start load generator for use case UC1. */ public static void main(final String[] args) throws InterruptedException, IOException { - // uc1 LOGGER.info("Start workload generator for use case UC1."); - - // get environment variables - final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost"); - final int zooKeeperPort = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181")); - final int numSensors = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10")); - final int periodMs = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000")); - final double value = - Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10")); - final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), - "4")); - final String kafkaBootstrapServers = - Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), "localhost:9092"); - final String schemaRegistryUrl = - Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091"); - final String kafkaInputTopic = - Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input"); - final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE"); - final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS"); - final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY"); - final int instances = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1")); - - // create kafka record sender - final Properties kafkaProperties = new Properties(); - // kafkaProperties.put("acks", this.acknowledges); - kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize); - kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs); - kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory); - - final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = - new KafkaRecordSender.Builder<ActivePowerRecord>( - kafkaBootstrapServers, - kafkaInputTopic, - schemaRegistryUrl) - .keyAccessor(r -> r.getIdentifier()) - .timestampAccessor(r -> r.getTimestamp()) - .defaultProperties(kafkaProperties) - .build(); - - // create workload generator - final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator = - KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder() - .instances(instances) - .keySpace(new KeySpace("s_", numSensors)) - .threads(threads) - .period(Duration.of(periodMs, ChronoUnit.MILLIS)) - .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS)) - .generatorFunction( - sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value)) - .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort)) - .kafkaRecordSender(kafkaRecordSender) - .build(); - - // start - workloadGenerator.start(); + theodolite.commons.workloadgeneration.LoadGenerator.fromEnvironment().run(); } } diff --git a/benchmarks/uc2-application/Dockerfile b/benchmarks/uc2-application/Dockerfile index 99076645ab5e1c3b1a77d2aec7408dc8846f9f51..5177dcede26016990b73467460fd358823c43c76 100644 --- a/benchmarks/uc2-application/Dockerfile +++ b/benchmarks/uc2-application/Dockerfile @@ -2,5 +2,5 @@ FROM openjdk:11-slim ADD build/distributions/uc2-application.tar / -CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ +CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ /uc2-application/bin/uc2-application \ No newline at end of file diff --git a/benchmarks/uc2-application/build.gradle b/benchmarks/uc2-application/build.gradle index ea3d8779a0cd5406808df190d623d1508a143b9d..e4d3f5346e401def9c9a5a49820d0682eafb0ad3 100644 --- a/benchmarks/uc2-application/build.gradle +++ b/benchmarks/uc2-application/build.gradle @@ -1 +1 @@ -mainClassName = "theodolite.uc2.application.AggregationService" +mainClassName = "theodolite.uc2.application.HistoryService" diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/HistoryService.java similarity index 64% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/application/HistoryService.java index 12f35e8dcc532b19e470722094ba5aff07420ad2..1aa28400cc9d55c77518a880d8cc2f48a2823a6b 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/HistoryService.java +++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/HistoryService.java @@ -1,11 +1,12 @@ -package theodolite.uc4.application; +package theodolite.uc2.application; import java.time.Duration; +import java.util.Objects; import java.util.concurrent.CompletableFuture; import org.apache.commons.configuration2.Configuration; import org.apache.kafka.streams.KafkaStreams; import theodolite.commons.kafkastreams.ConfigurationKeys; -import theodolite.uc4.streamprocessing.Uc4KafkaStreamsBuilder; +import theodolite.uc2.streamprocessing.Uc2KafkaStreamsBuilder; import titan.ccp.common.configuration.ServiceConfigurations; /** @@ -18,6 +19,8 @@ public class HistoryService { private final Configuration config = ServiceConfigurations.createWithDefaults(); private final CompletableFuture<Void> stopEvent = new CompletableFuture<>(); + private final int windowDurationMinutes = Integer + .parseInt(Objects.requireNonNullElse(System.getenv("KAFKA_WINDOW_DURATION_MINUTES"), "60")); /** * Start the service. @@ -31,17 +34,12 @@ public class HistoryService { * */ private void createKafkaStreamsApplication() { - // Use case specific stream configuration - final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder(this.config); - uc4KafkaStreamsBuilder + final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder(this.config); + uc2KafkaStreamsBuilder .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC)) - .aggregtionDuration( - Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_DURATION_DAYS))) - .aggregationAdvance( - Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_ADVANCE_DAYS))); + .windowDuration(Duration.ofMinutes(this.windowDurationMinutes)); - // Configuration of the stream application - final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder.build(); + final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder.build(); this.stopEvent.thenRun(kafkaStreams::close); kafkaStreams.start(); diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java index 74e9bb99b80efec4c27d7eb50668d622a5d951f9..eda7c495a2cff6d58b62a8a6a74ea8e1b2d89aca 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java +++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/TopologyBuilder.java @@ -1,206 +1,74 @@ package theodolite.uc2.streamprocessing; +import com.google.common.math.Stats; import java.time.Duration; import java.util.Properties; -import java.util.Set; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.Consumed; -import org.apache.kafka.streams.kstream.Grouped; -import org.apache.kafka.streams.kstream.KStream; -import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.Produced; -import org.apache.kafka.streams.kstream.Suppressed; -import org.apache.kafka.streams.kstream.Suppressed.BufferConfig; import org.apache.kafka.streams.kstream.TimeWindows; -import org.apache.kafka.streams.kstream.Windowed; -import org.apache.kafka.streams.kstream.WindowedSerdes; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import theodolite.uc2.streamprocessing.util.StatsFactory; +import titan.ccp.common.kafka.GenericSerde; import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; -import titan.ccp.configuration.events.Event; -import titan.ccp.configuration.events.EventSerde; import titan.ccp.model.records.ActivePowerRecord; -import titan.ccp.model.records.AggregatedActivePowerRecord; -import titan.ccp.model.sensorregistry.SensorRegistry; /** * Builds Kafka Stream Topology for the History microservice. */ public class TopologyBuilder { - // Streams Variables + + private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class); + private final String inputTopic; - private final String feedbackTopic; private final String outputTopic; - private final String configurationTopic; - private final Duration emitPeriod; - private final Duration gracePeriod; - - // Serdes private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory; + private final Duration duration; private final StreamsBuilder builder = new StreamsBuilder(); - private final RecordAggregator recordAggregator = new RecordAggregator(); /** * Create a new {@link TopologyBuilder} using the given topics. - * - * @param inputTopic The topic where to read sensor measurements from. - * @param configurationTopic The topic where the hierarchy of the sensors is published. - * @param feedbackTopic The topic where aggregation results are written to for feedback. - * @param outputTopic The topic where to publish aggregation results. - * @param emitPeriod The Duration results are emitted with. - * @param gracePeriod The Duration for how long late arriving records are considered. - * @param srAvroSerdeFactory Factory for creating avro SERDEs - * */ public TopologyBuilder(final String inputTopic, final String outputTopic, - final String feedbackTopic, final String configurationTopic, - final Duration emitPeriod, final Duration gracePeriod, - final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory) { + final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory, + final Duration duration) { this.inputTopic = inputTopic; this.outputTopic = outputTopic; - this.feedbackTopic = feedbackTopic; - this.configurationTopic = configurationTopic; - this.emitPeriod = emitPeriod; - this.gracePeriod = gracePeriod; - this.srAvroSerdeFactory = srAvroSerdeFactory; + this.duration = duration; } /** - * Build the {@link Topology} for the Aggregation microservice. + * Build the {@link Topology} for the History microservice. */ public Topology build(final Properties properties) { - // 1. Build Parent-Sensor Table - final KTable<String, Set<String>> parentSensorTable = this.buildParentSensorTable(); - - // 2. Build Input Table - final KTable<String, ActivePowerRecord> inputTable = this.buildInputTable(); - - // 3. Build Last Value Table from Input and Parent-Sensor Table - final KTable<Windowed<SensorParentKey>, ActivePowerRecord> lastValueTable = - this.buildLastValueTable(parentSensorTable, inputTable); - - // 4. Build Aggregations Stream - final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations = - this.buildAggregationStream(lastValueTable); - - // 6. Expose Feedback Stream - this.exposeFeedbackStream(aggregations); - - // 5. Expose Aggregations Stream - this.exposeOutputStream(aggregations); - - return this.builder.build(properties); - } - - private KTable<String, ActivePowerRecord> buildInputTable() { - final KStream<String, ActivePowerRecord> values = this.builder - .stream(this.inputTopic, Consumed.with( - Serdes.String(), - this.srAvroSerdeFactory.forValues())); - - final KStream<String, ActivePowerRecord> aggregationsInput = this.builder - .stream(this.feedbackTopic, Consumed.with( - Serdes.String(), - this.srAvroSerdeFactory.<AggregatedActivePowerRecord>forValues())) - .mapValues(r -> new ActivePowerRecord(r.getIdentifier(), r.getTimestamp(), r.getSumInW())); - - final KTable<String, ActivePowerRecord> inputTable = values - .merge(aggregationsInput) - .groupByKey(Grouped.with( - Serdes.String(), - this.srAvroSerdeFactory.forValues())) - .reduce((aggr, value) -> value, Materialized.with( - Serdes.String(), - this.srAvroSerdeFactory.forValues())); - return inputTable; - } - - private KTable<String, Set<String>> buildParentSensorTable() { - final KStream<Event, String> configurationStream = this.builder - .stream(this.configurationTopic, Consumed.with(EventSerde.serde(), Serdes.String())) - .filter((key, value) -> key == Event.SENSOR_REGISTRY_CHANGED - || key == Event.SENSOR_REGISTRY_STATUS); - - return configurationStream - .mapValues(data -> SensorRegistry.fromJson(data)) - .flatTransform(new ChildParentsTransformerSupplier()) - .groupByKey(Grouped.with(Serdes.String(), OptionalParentsSerde.serde())) - .aggregate( - () -> Set.<String>of(), - (key, newValue, oldValue) -> newValue.orElse(null), - Materialized.with(Serdes.String(), ParentsSerde.serde())); - } - - private KTable<Windowed<SensorParentKey>, ActivePowerRecord> buildLastValueTable( - final KTable<String, Set<String>> parentSensorTable, - final KTable<String, ActivePowerRecord> inputTable) { - - return inputTable - .join(parentSensorTable, (record, parents) -> new JointRecordParents(parents, record)) - .toStream() - .flatTransform(new JointFlatTransformerSupplier()) - .groupByKey(Grouped.with( - SensorParentKeySerde.serde(), - this.srAvroSerdeFactory.forValues())) - .windowedBy(TimeWindows.of(this.emitPeriod).grace(this.gracePeriod)) - .reduce( - // TODO Configurable window aggregation function - (oldVal, newVal) -> newVal.getTimestamp() >= oldVal.getTimestamp() ? newVal : oldVal, - Materialized.with( - SensorParentKeySerde.serde(), - this.srAvroSerdeFactory.forValues())); - } - - private KTable<Windowed<String>, AggregatedActivePowerRecord> buildAggregationStream( - final KTable<Windowed<SensorParentKey>, ActivePowerRecord> lastValueTable) { - return lastValueTable - .groupBy( - (k, v) -> KeyValue.pair(new Windowed<>(k.key().getParent(), k.window()), v), - Grouped.with( - new WindowedSerdes.TimeWindowedSerde<>( - Serdes.String(), - this.emitPeriod.toMillis()), - this.srAvroSerdeFactory.forValues())) + this.builder + .stream(this.inputTopic, + Consumed.with(Serdes.String(), + this.srAvroSerdeFactory.<ActivePowerRecord>forValues())) + .groupByKey() + .windowedBy(TimeWindows.of(this.duration)) + // .aggregate( + // () -> 0.0, + // (key, activePowerRecord, agg) -> agg + activePowerRecord.getValueInW(), + // Materialized.with(Serdes.String(), Serdes.Double())) .aggregate( - () -> null, - this.recordAggregator::add, - this.recordAggregator::substract, + () -> Stats.of(), + (k, record, stats) -> StatsFactory.accumulate(stats, record.getValueInW()), Materialized.with( - new WindowedSerdes.TimeWindowedSerde<>( - Serdes.String(), - this.emitPeriod.toMillis()), - this.srAvroSerdeFactory.forValues())) - // TODO timestamp -1 indicates that this record is emitted by an substract event - .filter((k, record) -> record.getTimestamp() != -1); - } - - private void exposeFeedbackStream( - final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations) { - - aggregations + Serdes.String(), + GenericSerde.from(Stats::toByteArray, Stats::fromByteArray))) .toStream() - .filter((k, record) -> record != null) - .selectKey((k, v) -> k.key()) - .to(this.feedbackTopic, Produced.with( - Serdes.String(), - this.srAvroSerdeFactory.forValues())); - } - - private void exposeOutputStream( - final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations) { + .map((k, s) -> KeyValue.pair(k.key(), s.toString())) + .peek((k, v) -> LOGGER.info(k + ": " + v)) + .to(this.outputTopic, Produced.with(Serdes.String(), Serdes.String())); - aggregations - // .suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded())) - .suppress(Suppressed.untilTimeLimit(this.emitPeriod, BufferConfig.unbounded())) - .toStream() - .filter((k, record) -> record != null) - .selectKey((k, v) -> k.key()) - .to(this.outputTopic, Produced.with( - Serdes.String(), - this.srAvroSerdeFactory.forValues())); + return this.builder.build(properties); } } diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java index 7e077b101c0e1bfab359fc347ffe8c4acc9b88fc..1d6019f27cb78f6643e111095edbbdd9f6c03e1b 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java +++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/Uc2KafkaStreamsBuilder.java @@ -11,62 +11,33 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; /** * Builder for the Kafka Streams configuration. */ -public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD builder method +public class Uc2KafkaStreamsBuilder extends KafkaStreamsBuilder { - private static final Duration EMIT_PERIOD_DEFAULT = Duration.ofSeconds(1); - private static final Duration GRACE_PERIOD_DEFAULT = Duration.ZERO; - - private String feedbackTopic; // NOPMD private String outputTopic; // NOPMD - private String configurationTopic; // NOPMD - private Duration emitPeriod; // NOPMD - private Duration gracePeriod; // NOPMD + private Duration windowDuration; // NOPMD public Uc2KafkaStreamsBuilder(final Configuration config) { super(config); } - public Uc2KafkaStreamsBuilder feedbackTopic(final String feedbackTopic) { - this.feedbackTopic = feedbackTopic; - return this; - } - public Uc2KafkaStreamsBuilder outputTopic(final String outputTopic) { this.outputTopic = outputTopic; return this; } - public Uc2KafkaStreamsBuilder configurationTopic(final String configurationTopic) { - this.configurationTopic = configurationTopic; - return this; - } - - public Uc2KafkaStreamsBuilder emitPeriod(final Duration emitPeriod) { - this.emitPeriod = Objects.requireNonNull(emitPeriod); - return this; - } - - public Uc2KafkaStreamsBuilder gracePeriod(final Duration gracePeriod) { - this.gracePeriod = Objects.requireNonNull(gracePeriod); + public Uc2KafkaStreamsBuilder windowDuration(final Duration windowDuration) { + this.windowDuration = windowDuration; return this; } @Override protected Topology buildTopology(final Properties properties) { Objects.requireNonNull(this.inputTopic, "Input topic has not been set."); - Objects.requireNonNull(this.feedbackTopic, "Feedback topic has not been set."); Objects.requireNonNull(this.outputTopic, "Output topic has not been set."); - Objects.requireNonNull(this.configurationTopic, "Configuration topic has not been set."); - - final TopologyBuilder topologyBuilder = new TopologyBuilder( - this.inputTopic, - this.outputTopic, - this.feedbackTopic, - this.configurationTopic, - this.emitPeriod == null ? EMIT_PERIOD_DEFAULT : this.emitPeriod, - this.gracePeriod == null ? GRACE_PERIOD_DEFAULT : this.gracePeriod, - new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl)); + Objects.requireNonNull(this.windowDuration, "Window duration has not been set."); + final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic, + new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), this.windowDuration); return topologyBuilder.build(properties); } diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java similarity index 91% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java rename to benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java index e97fbcd216c57a8aa965ee7a295c5633fa34810e..e4aff4fc80cea24c20be537f6aa5cda7c2be909a 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/util/StatsFactory.java +++ b/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/util/StatsFactory.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing.util; +package theodolite.uc2.streamprocessing.util; import com.google.common.math.Stats; import com.google.common.math.StatsAccumulator; diff --git a/benchmarks/uc2-application/src/main/resources/META-INF/application.properties b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties index 8f1af5f590eff7f2b12706d61a7c89d9152f7949..15293b1387b96688401bbc48bc2d1615c7b63aba 100644 --- a/benchmarks/uc2-application/src/main/resources/META-INF/application.properties +++ b/benchmarks/uc2-application/src/main/resources/META-INF/application.properties @@ -3,11 +3,7 @@ application.version=0.0.1 kafka.bootstrap.servers=localhost:9092 kafka.input.topic=input -kafka.configuration.topic=configuration -kafka.feedback.topic=aggregation-feedback kafka.output.topic=output +kafka.window.duration.minutes=1 schema.registry.url=http://localhost:8091 - -emit.period.ms=5000 -grace.period.ms=0 \ No newline at end of file diff --git a/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644 --- a/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc2-workload-generator/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true diff --git a/benchmarks/uc2-workload-generator/Dockerfile b/benchmarks/uc2-workload-generator/Dockerfile index 162243e055732de84d1680dba609425f4068dbc2..55593e0295efb0c4f7d4c484b1b104c256f9b958 100644 --- a/benchmarks/uc2-workload-generator/Dockerfile +++ b/benchmarks/uc2-workload-generator/Dockerfile @@ -1,6 +1,6 @@ -FROM openjdk:11-slim - -ADD build/distributions/uc2-workload-generator.tar / - -CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ +FROM openjdk:11-slim + +ADD build/distributions/uc2-workload-generator.tar / + +CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ /uc2-workload-generator/bin/uc2-workload-generator \ No newline at end of file diff --git a/benchmarks/uc2-workload-generator/build.gradle b/benchmarks/uc2-workload-generator/build.gradle index b92e0c2edc54786ea957338b9981922f0a6a7b32..f2c3e5d2e73b655dffd94222ecfbc4fc31b7f722 100644 --- a/benchmarks/uc2-workload-generator/build.gradle +++ b/benchmarks/uc2-workload-generator/build.gradle @@ -1 +1 @@ -mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator" +mainClassName = "theodolite.uc2.workloadgenerator.LoadGenerator" diff --git a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java index 3eb3e8d25b1f1aa6f302673727b8457a744fb503..2c5b59bc19f703c4216bc02920b62bcf9da5d5fb 100644 --- a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java +++ b/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/LoadGenerator.java @@ -1,139 +1,19 @@ package theodolite.uc2.workloadgenerator; -import java.io.IOException; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.Objects; -import java.util.Properties; -import org.apache.kafka.clients.producer.ProducerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; -import titan.ccp.configuration.events.Event; -import titan.ccp.model.records.ActivePowerRecord; -import titan.ccp.model.sensorregistry.SensorRegistry; /** - * The {@code LoadGenerator} creates a load in Kafka. + * Load generator for Theodolite use case UC2. */ public final class LoadGenerator { - private static final int SLEEP_PERIOD = 30_000; - private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class); - // Constants - private static final String DEEP = "deep"; - private static final long MAX_DURATION_IN_DAYS = 30L; + private LoadGenerator() {} - // Make this a utility class, because all methods are static. - private LoadGenerator() { - throw new UnsupportedOperationException(); + public static void main(final String[] args) { + LOGGER.info("Start workload generator for use case UC2"); + theodolite.commons.workloadgeneration.LoadGenerator.fromEnvironment().run(); } - - /** - * Main method. - * - * @param args CLI arguments - * @throws InterruptedException Interrupt happened - * @throws IOException happened. - */ - public static void main(final String[] args) throws InterruptedException, IOException { - // uc2 - LOGGER.info("Start workload generator for use case UC2."); - - // get environment variables - final String hierarchy = System.getenv("HIERARCHY"); - if (hierarchy != null && hierarchy.equals(DEEP)) { - LOGGER.error( - "The HIERARCHY parameter is no longer supported. Creating a full hierachy instead."); - } - final int numNestedGroups = Integer - .parseInt(Objects.requireNonNullElse(System.getenv("NUM_NESTED_GROUPS"), "1")); - final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost"); - final int zooKeeperPort = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181")); - final int numSensors = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "1")); - final int periodMs = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000")); - final double value = - Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10")); - final boolean sendRegistry = Boolean - .parseBoolean(Objects.requireNonNullElse(System.getenv("SEND_REGISTRY"), "true")); - final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4")); - final String kafkaBootstrapServers = - Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), - "localhost:9092"); - final String schemaRegistryUrl = - Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091"); - final String kafkaInputTopic = - Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input"); - final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE"); - final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS"); - final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY"); - final int instances = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1")); - - // build sensor registry - final SensorRegistry sensorRegistry = - new SensorRegistryBuilder(numNestedGroups, numSensors).build(); - - // create kafka record sender - final Properties kafkaProperties = new Properties(); - // kafkaProperties.put("acks", this.acknowledges); - kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize); - kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs); - kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory); - - final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = - new KafkaRecordSender.Builder<ActivePowerRecord>( - kafkaBootstrapServers, - kafkaInputTopic, - schemaRegistryUrl) - .keyAccessor(r -> r.getIdentifier()) - .timestampAccessor(r -> r.getTimestamp()) - .defaultProperties(kafkaProperties) - .build(); - - // create workload generator - final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator = - KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder() - .instances(instances) - .keySpace(new KeySpace("s_", sensorRegistry.getMachineSensors().size())) - .threads(threads) - .period(Duration.of(periodMs, ChronoUnit.MILLIS)) - .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS)) - .beforeAction(() -> { - if (sendRegistry) { - final ConfigPublisher configPublisher = - new ConfigPublisher(kafkaBootstrapServers, "configuration"); - configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson()); - configPublisher.close(); - LOGGER.info("Configuration sent."); - - LOGGER.info("Now wait 30 seconds"); - try { - Thread.sleep(SLEEP_PERIOD); - } catch (final InterruptedException e) { - // TODO Auto-generated catch block - LOGGER.error(e.getMessage(), e); - } - LOGGER.info("And woke up again :)"); - } - }) - .generatorFunction( - sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value)) - .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort)) - .kafkaRecordSender(kafkaRecordSender) - .build(); - - // start - workloadGenerator.start(); - } - } diff --git a/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644 --- a/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc3-application/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true diff --git a/benchmarks/uc3-application/Dockerfile b/benchmarks/uc3-application/Dockerfile index c70a24268e114e924b5f06dc7a8979100f5d8455..61141baaf752af4b596c8a04cd0d7cc2e6d740af 100644 --- a/benchmarks/uc3-application/Dockerfile +++ b/benchmarks/uc3-application/Dockerfile @@ -1,8 +1,6 @@ FROM openjdk:11-slim - ADD build/distributions/uc3-application.tar / - CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ - /uc3-application/bin/uc3-application \ No newline at end of file + /uc3-application/bin/uc3-application diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java index 349512f988bb182d8851e458a1bce244c756bbfe..84fb29969d2ce37a1d443752790379b1af634df5 100644 --- a/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/application/HistoryService.java @@ -1,7 +1,6 @@ package theodolite.uc3.application; import java.time.Duration; -import java.util.Objects; import java.util.concurrent.CompletableFuture; import org.apache.commons.configuration2.Configuration; import org.apache.kafka.streams.KafkaStreams; @@ -19,8 +18,6 @@ public class HistoryService { private final Configuration config = ServiceConfigurations.createWithDefaults(); private final CompletableFuture<Void> stopEvent = new CompletableFuture<>(); - private final int windowDurationMinutes = Integer - .parseInt(Objects.requireNonNullElse(System.getenv("KAFKA_WINDOW_DURATION_MINUTES"), "60")); /** * Start the service. @@ -34,11 +31,16 @@ public class HistoryService { * */ private void createKafkaStreamsApplication() { + // Use case specific stream configuration final Uc3KafkaStreamsBuilder uc3KafkaStreamsBuilder = new Uc3KafkaStreamsBuilder(this.config); uc3KafkaStreamsBuilder .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC)) - .windowDuration(Duration.ofMinutes(this.windowDurationMinutes)); + .aggregtionDuration( + Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_DURATION_DAYS))) + .aggregationAdvance( + Duration.ofDays(this.config.getInt(ConfigurationKeys.AGGREGATION_ADVANCE_DAYS))); + // Configuration of the stream application final KafkaStreams kafkaStreams = uc3KafkaStreamsBuilder.build(); this.stopEvent.thenRun(kafkaStreams::close); diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java similarity index 96% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java index 97807e3bdecf4000cc2edeed364b8f9d1bc9bb8e..549674f9f546a26d38491195edc2139aeadd785b 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKey.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKey.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import java.util.Objects; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java similarity index 92% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java index edb9ad2b20ac645dfade840130e1be67d2505304..837ca9d32e1a353917adcd3f70eb1af51d801613 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeyFactory.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeyFactory.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import java.time.LocalDateTime; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java similarity index 96% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java index ff404ab121ca2e60da65f11d89b8ec5849bd600d..6855907e7f357d681c3bd9a6054bf15ad29711ed 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayKeySerde.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayKeySerde.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import org.apache.kafka.common.serialization.Serde; import titan.ccp.common.kafka.simpleserdes.BufferSerde; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java similarity index 95% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java index 7249309cea036bff9203ce9a7aa32489f69edebe..dfa9b95b08b95bf29621969c56a1e76cdcfc7877 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/HourOfDayRecordFactory.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/HourOfDayRecordFactory.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import com.google.common.math.Stats; import org.apache.kafka.streams.kstream.Windowed; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java similarity index 98% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java index 8f693d5d3d309eb73a017b8d33dfcd63e70724fb..342cb3e04cd632fc4e8129de0bad6f12e8119dfa 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordDatabaseAdapter.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/RecordDatabaseAdapter.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import java.util.Collection; import java.util.List; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java similarity index 88% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java index cf67efbd34362c337a956d80f14731cf9b9d6b77..0e414c4a13f1cf7df1da5f0026b6de82e1c1c6ce 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsKeyFactory.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsKeyFactory.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import java.time.LocalDateTime; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java similarity index 94% rename from benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java rename to benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java index 79eb4b9f76e4429cf84d0af0e56875ea0386e218..31935df9db0949b05e602109b3edc23dee9499af 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/StatsRecordFactory.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/StatsRecordFactory.java @@ -1,4 +1,4 @@ -package theodolite.uc4.streamprocessing; +package theodolite.uc3.streamprocessing; import com.google.common.math.Stats; import org.apache.avro.specific.SpecificRecord; diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java index d6d6d4ffb7ebb1236be73dd681c900311853e732..1e976c07158720b3681d89413a5f277b1395f32d 100644 --- a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/TopologyBuilder.java @@ -2,17 +2,20 @@ package theodolite.uc3.streamprocessing; import com.google.common.math.Stats; import java.time.Duration; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; import java.util.Properties; +import org.apache.kafka.common.serialization.Serde; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.Grouped; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.Produced; import org.apache.kafka.streams.kstream.TimeWindows; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import theodolite.uc3.streamprocessing.util.StatsFactory; import titan.ccp.common.kafka.GenericSerde; import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; @@ -23,12 +26,16 @@ import titan.ccp.model.records.ActivePowerRecord; */ public class TopologyBuilder { - private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class); + // private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class); + + private final ZoneId zone = ZoneId.of("Europe/Paris"); // TODO as parameter + private final String inputTopic; private final String outputTopic; private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory; - private final Duration duration; + private final Duration aggregtionDuration; + private final Duration aggregationAdvance; private final StreamsBuilder builder = new StreamsBuilder(); @@ -37,37 +44,51 @@ public class TopologyBuilder { */ public TopologyBuilder(final String inputTopic, final String outputTopic, final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory, - final Duration duration) { + final Duration aggregtionDuration, final Duration aggregationAdvance) { this.inputTopic = inputTopic; this.outputTopic = outputTopic; this.srAvroSerdeFactory = srAvroSerdeFactory; - this.duration = duration; + this.aggregtionDuration = aggregtionDuration; + this.aggregationAdvance = aggregationAdvance; } /** * Build the {@link Topology} for the History microservice. */ public Topology build(final Properties properties) { + final StatsKeyFactory<HourOfDayKey> keyFactory = new HourOfDayKeyFactory(); + final Serde<HourOfDayKey> keySerde = HourOfDayKeySerde.create(); + this.builder .stream(this.inputTopic, Consumed.with(Serdes.String(), this.srAvroSerdeFactory.<ActivePowerRecord>forValues())) - .groupByKey() - .windowedBy(TimeWindows.of(this.duration)) - // .aggregate( - // () -> 0.0, - // (key, activePowerRecord, agg) -> agg + activePowerRecord.getValueInW(), - // Materialized.with(Serdes.String(), Serdes.Double())) + .selectKey((key, value) -> { + final Instant instant = Instant.ofEpochMilli(value.getTimestamp()); + final LocalDateTime dateTime = LocalDateTime.ofInstant(instant, this.zone); + return keyFactory.createKey(value.getIdentifier(), dateTime); + }) + .groupByKey( + Grouped.with(keySerde, this.srAvroSerdeFactory.forValues())) + .windowedBy(TimeWindows.of(this.aggregtionDuration).advanceBy(this.aggregationAdvance)) .aggregate( () -> Stats.of(), (k, record, stats) -> StatsFactory.accumulate(stats, record.getValueInW()), - Materialized.with( - Serdes.String(), + Materialized.with(keySerde, GenericSerde.from(Stats::toByteArray, Stats::fromByteArray))) .toStream() - .map((k, s) -> KeyValue.pair(k.key(), s.toString())) - .peek((k, v) -> LOGGER.info(k + ": " + v)) - .to(this.outputTopic, Produced.with(Serdes.String(), Serdes.String())); + .map((key, stats) -> KeyValue.pair( + keyFactory.getSensorId(key.key()), + stats.toString())) + // TODO + // statsRecordFactory.create(key, value))) + // .peek((k, v) -> LOGGER.info("{}: {}", k, v)) // TODO Temp logging + .to( + this.outputTopic, + Produced.with( + Serdes.String(), + Serdes.String())); + // this.serdes.avroValues())); return this.builder.build(properties); } diff --git a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java index 70113271a9d3c23499b85c07bf9d0a76db59f820..ea9b064602b1aa7cf7350826da18990ae3191d43 100644 --- a/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java +++ b/benchmarks/uc3-application/src/main/java/theodolite/uc3/streamprocessing/Uc3KafkaStreamsBuilder.java @@ -14,7 +14,8 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder { private String outputTopic; // NOPMD - private Duration windowDuration; // NOPMD + private Duration aggregtionDuration; // NOPMD + private Duration aggregationAdvance; // NOPMD public Uc3KafkaStreamsBuilder(final Configuration config) { super(config); @@ -25,8 +26,13 @@ public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder { return this; } - public Uc3KafkaStreamsBuilder windowDuration(final Duration windowDuration) { - this.windowDuration = windowDuration; + public Uc3KafkaStreamsBuilder aggregtionDuration(final Duration aggregtionDuration) { + this.aggregtionDuration = aggregtionDuration; + return this; + } + + public Uc3KafkaStreamsBuilder aggregationAdvance(final Duration aggregationAdvance) { + this.aggregationAdvance = aggregationAdvance; return this; } @@ -34,10 +40,16 @@ public class Uc3KafkaStreamsBuilder extends KafkaStreamsBuilder { protected Topology buildTopology(final Properties properties) { Objects.requireNonNull(this.inputTopic, "Input topic has not been set."); Objects.requireNonNull(this.outputTopic, "Output topic has not been set."); - Objects.requireNonNull(this.windowDuration, "Window duration has not been set."); + Objects.requireNonNull(this.aggregtionDuration, "Aggregation duration has not been set."); + Objects.requireNonNull(this.aggregationAdvance, "Aggregation advance period has not been set."); + + final TopologyBuilder topologyBuilder = new TopologyBuilder( + this.inputTopic, + this.outputTopic, + new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), + this.aggregtionDuration, + this.aggregationAdvance); - final TopologyBuilder topologyBuilder = new TopologyBuilder(this.inputTopic, this.outputTopic, - new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), this.windowDuration); return topologyBuilder.build(properties); } diff --git a/benchmarks/uc3-application/src/main/resources/META-INF/application.properties b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties index 011406f7ef1e23647eeae150d349f472214cbcd4..1273441a61763325c812541e1af8c243f81a31a5 100644 --- a/benchmarks/uc3-application/src/main/resources/META-INF/application.properties +++ b/benchmarks/uc3-application/src/main/resources/META-INF/application.properties @@ -4,6 +4,7 @@ application.version=0.0.1 kafka.bootstrap.servers=localhost:9092 kafka.input.topic=input kafka.output.topic=output -kafka.window.duration.minutes=1 +aggregation.duration.days=30 +aggregation.advance.days=1 schema.registry.url=http://localhost:8091 diff --git a/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644 --- a/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc3-workload-generator/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true diff --git a/benchmarks/uc3-workload-generator/Dockerfile b/benchmarks/uc3-workload-generator/Dockerfile index 6efd5ec6163815c467ef22e18f3d2cc1e0e3259a..8422c9d5371b86ced0a38c141c461aef452133ac 100644 --- a/benchmarks/uc3-workload-generator/Dockerfile +++ b/benchmarks/uc3-workload-generator/Dockerfile @@ -1,6 +1,6 @@ -FROM openjdk:11-slim - -ADD build/distributions/uc3-workload-generator.tar / - -CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ - /uc3-workload-generator/bin/uc3-workload-generator \ No newline at end of file +FROM openjdk:11-slim + +ADD build/distributions/uc3-workload-generator.tar / + +CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ + /uc3-workload-generator/bin/uc3-workload-generator diff --git a/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java b/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java index 85f6a94036c53b48973ba2200212fc8e5dfd663d..97527abfdd86f5ea39c20c3da31cd7cd26b674e5 100644 --- a/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java +++ b/benchmarks/uc3-workload-generator/src/main/java/theodolite/uc3/workloadgenerator/LoadGenerator.java @@ -1,102 +1,22 @@ package theodolite.uc3.workloadgenerator; -import java.io.IOException; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.Objects; -import java.util.Properties; -import org.apache.kafka.clients.producer.ProducerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; -import titan.ccp.model.records.ActivePowerRecord; /** - * The {@code LoadGenerator} creates a load in Kafka. + * Load generator for Theodolite use case UC3. */ public final class LoadGenerator { private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class); - // constants - private static final long MAX_DURATION_IN_DAYS = 30L; - - // Make this a utility class, because all methods are static. private LoadGenerator() { throw new UnsupportedOperationException(); } - /** - * Main method. - * - * @param args CLI arguments - * @throws InterruptedException Interrupt happened - * @throws IOException happened. - */ - public static void main(final String[] args) throws InterruptedException, IOException { - // uc2 - LOGGER.info("Start workload generator for use case UC3."); - - // get environment variables - final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost"); - final int zooKeeperPort = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181")); - final int numSensors = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10")); - final int periodMs = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000")); - final double value = - Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10")); - final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "4")); - final String kafkaBootstrapServers = - Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), - "localhost:9092"); - final String schemaRegistryUrl = - Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091"); - final String kafkaInputTopic = - Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input"); - final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE"); - final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS"); - final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY"); - final int instances = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1")); - - // create kafka record sender - final Properties kafkaProperties = new Properties(); - // kafkaProperties.put("acks", this.acknowledges); - kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize); - kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs); - kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory); - final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = - new KafkaRecordSender.Builder<ActivePowerRecord>( - kafkaBootstrapServers, - kafkaInputTopic, - schemaRegistryUrl) - .keyAccessor(r -> r.getIdentifier()) - .timestampAccessor(r -> r.getTimestamp()) - .defaultProperties(kafkaProperties) - .build(); - - // create workload generator - final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator = - KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder() - .instances(instances) - .keySpace(new KeySpace("s_", numSensors)) - .threads(threads) - .period(Duration.of(periodMs, ChronoUnit.MILLIS)) - .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS)) - .generatorFunction( - sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value)) - .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort)) - .kafkaRecordSender(kafkaRecordSender) - .build(); - - // start - workloadGenerator.start(); - + public static void main(final String[] args) { + LOGGER.info("Start workload generator for use case UC3"); + theodolite.commons.workloadgeneration.LoadGenerator.fromEnvironment().run(); } + } diff --git a/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644 --- a/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc4-application/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true diff --git a/benchmarks/uc4-application/Dockerfile b/benchmarks/uc4-application/Dockerfile index 8cb65188ab9885af0dc4e243319969626cb74d62..add251c0ef11324830bcada9174fbbdecc18d532 100644 --- a/benchmarks/uc4-application/Dockerfile +++ b/benchmarks/uc4-application/Dockerfile @@ -1,8 +1,6 @@ FROM openjdk:11-slim - ADD build/distributions/uc4-application.tar / - -CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ +CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ /uc4-application/bin/uc4-application diff --git a/benchmarks/uc2-application/README.md b/benchmarks/uc4-application/README.md similarity index 100% rename from benchmarks/uc2-application/README.md rename to benchmarks/uc4-application/README.md diff --git a/benchmarks/uc4-application/build.gradle b/benchmarks/uc4-application/build.gradle index 56663022144166711d6bebce0f6480e358a738b5..9cb1b311d8f50769d371952db886e4a00a454591 100644 --- a/benchmarks/uc4-application/build.gradle +++ b/benchmarks/uc4-application/build.gradle @@ -1 +1 @@ -mainClassName = "theodolite.uc4.application.HistoryService" +mainClassName = "theodolite.uc4.application.AggregationService" diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/AggregationService.java similarity index 85% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/application/AggregationService.java index 2f828278f5a3033c3e479bf82f3c8c5d9d4c380c..5c9d0910e7fbc60e58b13fc838f7ef2407de2aa3 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/application/AggregationService.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/application/AggregationService.java @@ -1,11 +1,11 @@ -package theodolite.uc2.application; +package theodolite.uc4.application; import java.time.Duration; import java.util.concurrent.CompletableFuture; import org.apache.commons.configuration2.Configuration; import org.apache.kafka.streams.KafkaStreams; import theodolite.commons.kafkastreams.ConfigurationKeys; -import theodolite.uc2.streamprocessing.Uc2KafkaStreamsBuilder; +import theodolite.uc4.streamprocessing.Uc4KafkaStreamsBuilder; import titan.ccp.common.configuration.ServiceConfigurations; /** @@ -36,15 +36,15 @@ public class AggregationService { * @param clusterSession the database session which the application should use. */ private void createKafkaStreamsApplication() { - final Uc2KafkaStreamsBuilder uc2KafkaStreamsBuilder = new Uc2KafkaStreamsBuilder(this.config); - uc2KafkaStreamsBuilder + final Uc4KafkaStreamsBuilder uc4KafkaStreamsBuilder = new Uc4KafkaStreamsBuilder(this.config); + uc4KafkaStreamsBuilder .feedbackTopic(this.config.getString(ConfigurationKeys.KAFKA_FEEDBACK_TOPIC)) .outputTopic(this.config.getString(ConfigurationKeys.KAFKA_OUTPUT_TOPIC)) .configurationTopic(this.config.getString(ConfigurationKeys.KAFKA_CONFIGURATION_TOPIC)) .emitPeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.EMIT_PERIOD_MS))) .gracePeriod(Duration.ofMillis(this.config.getLong(ConfigurationKeys.GRACE_PERIOD_MS))); - final KafkaStreams kafkaStreams = uc2KafkaStreamsBuilder.build(); + final KafkaStreams kafkaStreams = uc4KafkaStreamsBuilder.build(); this.stopEvent.thenRun(kafkaStreams::close); kafkaStreams.start(); diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java similarity index 99% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java index d4f9097ad0fa176842872e43f2f69a8616a65166..db28c86bce79caa4345a3a2bc7914c3e2bbd1a32 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformer.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformer.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Map; import java.util.Optional; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java similarity index 97% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java index 2b2d71c2f95d052cee19394e3e62e674776f8627..d17757d6800890eaf5260af9c25914344ca4a625 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ChildParentsTransformerSupplier.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ChildParentsTransformerSupplier.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Map; import java.util.Optional; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java similarity index 98% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java index 724c7f6e2eaebc7be53f03b89d143d885c4a055c..d3500adff664cba8f3f92707a0adba34534404b7 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformer.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformer.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import com.google.common.base.MoreObjects; import java.util.ArrayList; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java similarity index 96% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java index 7d9a7df3d465260623abef2b13e9f3765925bc57..51c7ce1f6cb144c88356ef1b32bdfce400e1ffb4 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointFlatTransformerSupplier.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointFlatTransformerSupplier.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Map; import java.util.Set; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java similarity index 96% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java index cba05f1ed8e585d5c31aaa92207e0d2854436736..e9a5a824e43dfbab83151da5c2a8f18f9105f494 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/JointRecordParents.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/JointRecordParents.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Objects; import java.util.Set; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java similarity index 97% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java index 5cb8f1ed8fcc1cecff1eefa4922531555a78c25f..a1e9767da047951e04d4c3914c2d1b36bd18626b 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/OptionalParentsSerde.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/OptionalParentsSerde.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.HashSet; import java.util.Optional; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java similarity index 96% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java index 266eaad015979a9e4ae748f7647ddcaf5947c78b..df6f848b5dfde10a96aceaf4d4a293364d52b982 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/ParentsSerde.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/ParentsSerde.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.HashSet; import java.util.Set; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java similarity index 97% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java index 9564e994da8fc909147bec76097c737f14247868..34ef3762d6a3219958329762ce6e39844684068a 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/RecordAggregator.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/RecordAggregator.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import org.apache.kafka.streams.kstream.Windowed; import titan.ccp.model.records.ActivePowerRecord; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java similarity index 96% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java index a4fb5b33966882b94d46c96282bdaaed92d67ebd..667cc6d5ee83a41f7c04fc8074a18ef1a9422b0e 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKey.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKey.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Objects; diff --git a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java similarity index 95% rename from benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java rename to benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java index d6773c6159f1d04ddf1c3f36fd25447575befce8..63b9e44b5a7bde8f47fe7620b286aefa7fc60841 100644 --- a/benchmarks/uc2-application/src/main/java/theodolite/uc2/streamprocessing/SensorParentKeySerde.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/SensorParentKeySerde.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import org.apache.kafka.common.serialization.Serde; import titan.ccp.common.kafka.simpleserdes.BufferSerde; diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java index a0c87ba4702b9c3f191291a3f04679cc73fcb04b..623870313cd341d0594fee38d2fd0ae297abbeae 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/TopologyBuilder.java @@ -1,95 +1,206 @@ package theodolite.uc4.streamprocessing; -import com.google.common.math.Stats; import java.time.Duration; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.ZoneId; import java.util.Properties; -import org.apache.kafka.common.serialization.Serde; +import java.util.Set; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.StreamsBuilder; import org.apache.kafka.streams.Topology; import org.apache.kafka.streams.kstream.Consumed; import org.apache.kafka.streams.kstream.Grouped; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.Produced; +import org.apache.kafka.streams.kstream.Suppressed; +import org.apache.kafka.streams.kstream.Suppressed.BufferConfig; import org.apache.kafka.streams.kstream.TimeWindows; -import theodolite.uc4.streamprocessing.util.StatsFactory; -import titan.ccp.common.kafka.GenericSerde; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.WindowedSerdes; import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; +import titan.ccp.configuration.events.Event; +import titan.ccp.configuration.events.EventSerde; import titan.ccp.model.records.ActivePowerRecord; +import titan.ccp.model.records.AggregatedActivePowerRecord; +import titan.ccp.model.sensorregistry.SensorRegistry; /** * Builds Kafka Stream Topology for the History microservice. */ public class TopologyBuilder { - - // private static final Logger LOGGER = LoggerFactory.getLogger(TopologyBuilder.class); - - private final ZoneId zone = ZoneId.of("Europe/Paris"); // TODO as parameter - - + // Streams Variables private final String inputTopic; + private final String feedbackTopic; private final String outputTopic; + private final String configurationTopic; + private final Duration emitPeriod; + private final Duration gracePeriod; + + // Serdes private final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory; - private final Duration aggregtionDuration; - private final Duration aggregationAdvance; private final StreamsBuilder builder = new StreamsBuilder(); + private final RecordAggregator recordAggregator = new RecordAggregator(); /** * Create a new {@link TopologyBuilder} using the given topics. + * + * @param inputTopic The topic where to read sensor measurements from. + * @param configurationTopic The topic where the hierarchy of the sensors is published. + * @param feedbackTopic The topic where aggregation results are written to for feedback. + * @param outputTopic The topic where to publish aggregation results. + * @param emitPeriod The Duration results are emitted with. + * @param gracePeriod The Duration for how long late arriving records are considered. + * @param srAvroSerdeFactory Factory for creating avro SERDEs + * */ public TopologyBuilder(final String inputTopic, final String outputTopic, - final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory, - final Duration aggregtionDuration, final Duration aggregationAdvance) { + final String feedbackTopic, final String configurationTopic, + final Duration emitPeriod, final Duration gracePeriod, + final SchemaRegistryAvroSerdeFactory srAvroSerdeFactory) { this.inputTopic = inputTopic; this.outputTopic = outputTopic; + this.feedbackTopic = feedbackTopic; + this.configurationTopic = configurationTopic; + this.emitPeriod = emitPeriod; + this.gracePeriod = gracePeriod; + this.srAvroSerdeFactory = srAvroSerdeFactory; - this.aggregtionDuration = aggregtionDuration; - this.aggregationAdvance = aggregationAdvance; } /** - * Build the {@link Topology} for the History microservice. + * Build the {@link Topology} for the Aggregation microservice. */ public Topology build(final Properties properties) { - final StatsKeyFactory<HourOfDayKey> keyFactory = new HourOfDayKeyFactory(); - final Serde<HourOfDayKey> keySerde = HourOfDayKeySerde.create(); - - this.builder - .stream(this.inputTopic, - Consumed.with(Serdes.String(), - this.srAvroSerdeFactory.<ActivePowerRecord>forValues())) - .selectKey((key, value) -> { - final Instant instant = Instant.ofEpochMilli(value.getTimestamp()); - final LocalDateTime dateTime = LocalDateTime.ofInstant(instant, this.zone); - return keyFactory.createKey(value.getIdentifier(), dateTime); - }) - .groupByKey( - Grouped.with(keySerde, this.srAvroSerdeFactory.forValues())) - .windowedBy(TimeWindows.of(this.aggregtionDuration).advanceBy(this.aggregationAdvance)) + // 1. Build Parent-Sensor Table + final KTable<String, Set<String>> parentSensorTable = this.buildParentSensorTable(); + + // 2. Build Input Table + final KTable<String, ActivePowerRecord> inputTable = this.buildInputTable(); + + // 3. Build Last Value Table from Input and Parent-Sensor Table + final KTable<Windowed<SensorParentKey>, ActivePowerRecord> lastValueTable = + this.buildLastValueTable(parentSensorTable, inputTable); + + // 4. Build Aggregations Stream + final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations = + this.buildAggregationStream(lastValueTable); + + // 6. Expose Feedback Stream + this.exposeFeedbackStream(aggregations); + + // 5. Expose Aggregations Stream + this.exposeOutputStream(aggregations); + + return this.builder.build(properties); + } + + private KTable<String, ActivePowerRecord> buildInputTable() { + final KStream<String, ActivePowerRecord> values = this.builder + .stream(this.inputTopic, Consumed.with( + Serdes.String(), + this.srAvroSerdeFactory.forValues())); + + final KStream<String, ActivePowerRecord> aggregationsInput = this.builder + .stream(this.feedbackTopic, Consumed.with( + Serdes.String(), + this.srAvroSerdeFactory.<AggregatedActivePowerRecord>forValues())) + .mapValues(r -> new ActivePowerRecord(r.getIdentifier(), r.getTimestamp(), r.getSumInW())); + + final KTable<String, ActivePowerRecord> inputTable = values + .merge(aggregationsInput) + .groupByKey(Grouped.with( + Serdes.String(), + this.srAvroSerdeFactory.forValues())) + .reduce((aggr, value) -> value, Materialized.with( + Serdes.String(), + this.srAvroSerdeFactory.forValues())); + return inputTable; + } + + private KTable<String, Set<String>> buildParentSensorTable() { + final KStream<Event, String> configurationStream = this.builder + .stream(this.configurationTopic, Consumed.with(EventSerde.serde(), Serdes.String())) + .filter((key, value) -> key == Event.SENSOR_REGISTRY_CHANGED + || key == Event.SENSOR_REGISTRY_STATUS); + + return configurationStream + .mapValues(data -> SensorRegistry.fromJson(data)) + .flatTransform(new ChildParentsTransformerSupplier()) + .groupByKey(Grouped.with(Serdes.String(), OptionalParentsSerde.serde())) .aggregate( - () -> Stats.of(), - (k, record, stats) -> StatsFactory.accumulate(stats, record.getValueInW()), - Materialized.with(keySerde, - GenericSerde.from(Stats::toByteArray, Stats::fromByteArray))) + () -> Set.<String>of(), + (key, newValue, oldValue) -> newValue.orElse(null), + Materialized.with(Serdes.String(), ParentsSerde.serde())); + } + + private KTable<Windowed<SensorParentKey>, ActivePowerRecord> buildLastValueTable( + final KTable<String, Set<String>> parentSensorTable, + final KTable<String, ActivePowerRecord> inputTable) { + + return inputTable + .join(parentSensorTable, (record, parents) -> new JointRecordParents(parents, record)) .toStream() - .map((key, stats) -> KeyValue.pair( - keyFactory.getSensorId(key.key()), - stats.toString())) - // TODO - // statsRecordFactory.create(key, value))) - // .peek((k, v) -> LOGGER.info("{}: {}", k, v)) // TODO Temp logging - .to( - this.outputTopic, - Produced.with( - Serdes.String(), - Serdes.String())); - // this.serdes.avroValues())); + .flatTransform(new JointFlatTransformerSupplier()) + .groupByKey(Grouped.with( + SensorParentKeySerde.serde(), + this.srAvroSerdeFactory.forValues())) + .windowedBy(TimeWindows.of(this.emitPeriod).grace(this.gracePeriod)) + .reduce( + // TODO Configurable window aggregation function + (oldVal, newVal) -> newVal.getTimestamp() >= oldVal.getTimestamp() ? newVal : oldVal, + Materialized.with( + SensorParentKeySerde.serde(), + this.srAvroSerdeFactory.forValues())); + } - return this.builder.build(properties); + private KTable<Windowed<String>, AggregatedActivePowerRecord> buildAggregationStream( + final KTable<Windowed<SensorParentKey>, ActivePowerRecord> lastValueTable) { + return lastValueTable + .groupBy( + (k, v) -> KeyValue.pair(new Windowed<>(k.key().getParent(), k.window()), v), + Grouped.with( + new WindowedSerdes.TimeWindowedSerde<>( + Serdes.String(), + this.emitPeriod.toMillis()), + this.srAvroSerdeFactory.forValues())) + .aggregate( + () -> null, + this.recordAggregator::add, + this.recordAggregator::substract, + Materialized.with( + new WindowedSerdes.TimeWindowedSerde<>( + Serdes.String(), + this.emitPeriod.toMillis()), + this.srAvroSerdeFactory.forValues())) + // TODO timestamp -1 indicates that this record is emitted by an substract event + .filter((k, record) -> record.getTimestamp() != -1); + } + + private void exposeFeedbackStream( + final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations) { + + aggregations + .toStream() + .filter((k, record) -> record != null) + .selectKey((k, v) -> k.key()) + .to(this.feedbackTopic, Produced.with( + Serdes.String(), + this.srAvroSerdeFactory.forValues())); + } + + private void exposeOutputStream( + final KTable<Windowed<String>, AggregatedActivePowerRecord> aggregations) { + + aggregations + // .suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded())) + .suppress(Suppressed.untilTimeLimit(this.emitPeriod, BufferConfig.unbounded())) + .toStream() + .filter((k, record) -> record != null) + .selectKey((k, v) -> k.key()) + .to(this.outputTopic, Produced.with( + Serdes.String(), + this.srAvroSerdeFactory.forValues())); } } diff --git a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java index 67c652967194f59db560b8ad6fd86410725b3c9c..9f1af3ba066bcdfef7f8e9073947d570a1327515 100644 --- a/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java +++ b/benchmarks/uc4-application/src/main/java/theodolite/uc4/streamprocessing/Uc4KafkaStreamsBuilder.java @@ -11,44 +11,61 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; /** * Builder for the Kafka Streams configuration. */ -public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder { +public class Uc4KafkaStreamsBuilder extends KafkaStreamsBuilder { // NOPMD builder method + private static final Duration EMIT_PERIOD_DEFAULT = Duration.ofSeconds(1); + private static final Duration GRACE_PERIOD_DEFAULT = Duration.ZERO; + + private String feedbackTopic; // NOPMD private String outputTopic; // NOPMD - private Duration aggregtionDuration; // NOPMD - private Duration aggregationAdvance; // NOPMD + private String configurationTopic; // NOPMD + private Duration emitPeriod; // NOPMD + private Duration gracePeriod; // NOPMD public Uc4KafkaStreamsBuilder(final Configuration config) { super(config); } + public Uc4KafkaStreamsBuilder feedbackTopic(final String feedbackTopic) { + this.feedbackTopic = feedbackTopic; + return this; + } + public Uc4KafkaStreamsBuilder outputTopic(final String outputTopic) { this.outputTopic = outputTopic; return this; } - public Uc4KafkaStreamsBuilder aggregtionDuration(final Duration aggregtionDuration) { - this.aggregtionDuration = aggregtionDuration; + public Uc4KafkaStreamsBuilder configurationTopic(final String configurationTopic) { + this.configurationTopic = configurationTopic; + return this; + } + + public Uc4KafkaStreamsBuilder emitPeriod(final Duration emitPeriod) { + this.emitPeriod = Objects.requireNonNull(emitPeriod); return this; } - public Uc4KafkaStreamsBuilder aggregationAdvance(final Duration aggregationAdvance) { - this.aggregationAdvance = aggregationAdvance; + public Uc4KafkaStreamsBuilder gracePeriod(final Duration gracePeriod) { + this.gracePeriod = Objects.requireNonNull(gracePeriod); return this; } @Override protected Topology buildTopology(final Properties properties) { Objects.requireNonNull(this.inputTopic, "Input topic has not been set."); + Objects.requireNonNull(this.feedbackTopic, "Feedback topic has not been set."); Objects.requireNonNull(this.outputTopic, "Output topic has not been set."); - Objects.requireNonNull(this.aggregtionDuration, "Aggregation duration has not been set."); - Objects.requireNonNull(this.aggregationAdvance, "Aggregation advance period has not been set."); + Objects.requireNonNull(this.configurationTopic, "Configuration topic has not been set."); final TopologyBuilder topologyBuilder = new TopologyBuilder( this.inputTopic, this.outputTopic, - new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl), - this.aggregtionDuration, - this.aggregationAdvance); + this.feedbackTopic, + this.configurationTopic, + this.emitPeriod == null ? EMIT_PERIOD_DEFAULT : this.emitPeriod, + this.gracePeriod == null ? GRACE_PERIOD_DEFAULT : this.gracePeriod, + new SchemaRegistryAvroSerdeFactory(this.schemaRegistryUrl)); return topologyBuilder.build(properties); } diff --git a/benchmarks/uc4-application/src/main/resources/META-INF/application.properties b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties index b46681533e63bf86a51439778a46940da348559d..ce06091076e6ff7f9ede355c7f54c12b3d872119 100644 --- a/benchmarks/uc4-application/src/main/resources/META-INF/application.properties +++ b/benchmarks/uc4-application/src/main/resources/META-INF/application.properties @@ -3,8 +3,11 @@ application.version=0.0.1 kafka.bootstrap.servers=localhost:9092 kafka.input.topic=input +kafka.configuration.topic=configuration +kafka.feedback.topic=aggregation-feedback kafka.output.topic=output -aggregation.duration.days=30 -aggregation.advance.days=1 schema.registry.url=http://localhost:8091 + +emit.period.ms=5000 +grace.period.ms=0 \ No newline at end of file diff --git a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java similarity index 95% rename from benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java rename to benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java index 54e8c460e642d53bb013ef6888570d6fc36ff614..600fc0b15ccc3ac3d902565fba1d073e37d98d0f 100644 --- a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/OptionalParentsSerdeTest.java +++ b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/OptionalParentsSerdeTest.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Optional; import java.util.Set; diff --git a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java similarity index 91% rename from benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java rename to benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java index f12604d6a19ca36e9c151210005c910b37908307..994593e27914af2ad56693e4b08b8143b27000b7 100644 --- a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/ParentsSerdeTest.java +++ b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/ParentsSerdeTest.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import java.util.Set; import org.junit.Test; diff --git a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java similarity index 92% rename from benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java rename to benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java index 7ca99bcb79baeb5f95a8270b99a559f2f108867e..34f87fa98ca7de7d6ca24a49a73729e5ecc2e74b 100644 --- a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SensorParentKeySerdeTest.java +++ b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SensorParentKeySerdeTest.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import org.junit.Test; diff --git a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java similarity index 94% rename from benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java rename to benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java index 8e9f5a3608e5bae032c6e79b7cd059a0776987c2..b5d5f942dac068379fe90a7462545adb7a11e7df 100644 --- a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTester.java +++ b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTester.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import static org.junit.Assert.assertEquals; import java.util.function.Function; diff --git a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java similarity index 94% rename from benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java rename to benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java index 5cdbfc60574bfc924423516f80ec61850853bcff..e8083ed778c450ef6717ca7b9c73daa3d96a7af3 100644 --- a/benchmarks/uc2-application/src/test/java/theodolite/uc2/streamprocessing/SerdeTesterFactory.java +++ b/benchmarks/uc4-application/src/test/java/theodolite/uc4/streamprocessing/SerdeTesterFactory.java @@ -1,4 +1,4 @@ -package theodolite.uc2.streamprocessing; +package theodolite.uc4.streamprocessing; import org.apache.kafka.common.serialization.Serde; diff --git a/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs b/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs index 4e04e2891754324a6e1bf55348b6a38f592bb301..fa98ca63d77bdee891150bd6713f70197a75cefc 100644 --- a/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs +++ b/benchmarks/uc4-workload-generator/.settings/org.eclipse.jdt.ui.prefs @@ -32,7 +32,7 @@ cleanup.qualify_static_member_accesses_with_declaring_class=true cleanup.qualify_static_method_accesses_with_declaring_class=false cleanup.remove_private_constructors=true cleanup.remove_redundant_modifiers=false -cleanup.remove_redundant_semicolons=false +cleanup.remove_redundant_semicolons=true cleanup.remove_redundant_type_arguments=true cleanup.remove_trailing_whitespaces=true cleanup.remove_trailing_whitespaces_all=true diff --git a/benchmarks/uc4-workload-generator/Dockerfile b/benchmarks/uc4-workload-generator/Dockerfile index 8f077637acb82e23ee69a8df749baeb72b3098af..f39923e59d3079d3b163ffc5d2e4906599de026d 100644 --- a/benchmarks/uc4-workload-generator/Dockerfile +++ b/benchmarks/uc4-workload-generator/Dockerfile @@ -2,5 +2,5 @@ FROM openjdk:11-slim ADD build/distributions/uc4-workload-generator.tar / -CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ +CMD JAVA_OPTS="$JAVA_OPTS -Dorg.slf4j.simpleLogger.defaultLogLevel=$LOG_LEVEL" \ /uc4-workload-generator/bin/uc4-workload-generator diff --git a/benchmarks/uc4-workload-generator/build.gradle b/benchmarks/uc4-workload-generator/build.gradle index 76bbce013b67bab325bac06c1986693da3028f0c..8865ec9391213f3d8c52be2366573dee09652087 100644 --- a/benchmarks/uc4-workload-generator/build.gradle +++ b/benchmarks/uc4-workload-generator/build.gradle @@ -1 +1 @@ -mainClassName = "theodolite.uc4.workloadgenerator.LoadGenerator" +mainClassName = "theodolite.uc4.workloadgenerator.LoadGenerator" diff --git a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java similarity index 98% rename from benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java rename to benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java index ad24e8e4bc8f86b7ed4d5dc2822622f8da22d6d1..ad0ee7082da9116f9ccb66a79d48b36bfb30da2e 100644 --- a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/ConfigPublisher.java +++ b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/ConfigPublisher.java @@ -1,4 +1,4 @@ -package theodolite.uc2.workloadgenerator; +package theodolite.uc4.workloadgenerator; import java.util.Properties; import java.util.concurrent.ExecutionException; diff --git a/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java index ff551e7ef423633137d122dfed7d6e03d362e7ff..8320d16b98fa1d253064d08397d5df1bb8e17b79 100644 --- a/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java +++ b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/LoadGenerator.java @@ -1,103 +1,65 @@ package theodolite.uc4.workloadgenerator; -import java.io.IOException; -import java.time.Duration; -import java.time.temporal.ChronoUnit; import java.util.Objects; -import java.util.Properties; -import org.apache.kafka.clients.producer.ProducerConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGenerator; -import theodolite.commons.workloadgeneration.generators.KafkaWorkloadGeneratorBuilder; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; -import titan.ccp.model.records.ActivePowerRecord; +import theodolite.commons.workloadgeneration.KeySpace; +import titan.ccp.configuration.events.Event; +import titan.ccp.model.sensorregistry.SensorRegistry; /** - * The {@code LoadGenerator} creates a load in Kafka. + * Load generator for Theodolite use case UC4. */ public final class LoadGenerator { - private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class); + private static final int SLEEP_PERIOD = 30_000; - // constants - private static final long MAX_DURATION_IN_DAYS = 30L; + private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class); - // Make this a utility class, because all methods are static. - private LoadGenerator() { - throw new UnsupportedOperationException(); - } + private LoadGenerator() {} /** - * Main method. - * - * @param args CLI arguments - * @throws InterruptedException Interrupt happened - * @throws IOException happened. + * Start load generator. */ - public static void main(final String[] args) throws InterruptedException, IOException { - // uc4 - LOGGER.info("Start workload generator for use case UC4."); - - // get environment variables - final String zooKeeperHost = Objects.requireNonNullElse(System.getenv("ZK_HOST"), "localhost"); - final int zooKeeperPort = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("ZK_PORT"), "2181")); - final int numSensors = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("NUM_SENSORS"), "10")); - final int periodMs = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("PERIOD_MS"), "1000")); - final double value = - Double.parseDouble(Objects.requireNonNullElse(System.getenv("VALUE"), "10")); - final int threads = Integer.parseInt(Objects.requireNonNullElse(System.getenv("THREADS"), "1")); - final String kafkaBootstrapServers = - Objects.requireNonNullElse(System.getenv("KAFKA_BOOTSTRAP_SERVERS"), - "localhost:9092"); - final String schemaRegistryUrl = - Objects.requireNonNullElse(System.getenv("SCHEMA_REGISTRY_URL"), "http://localhost:8091"); - final String kafkaInputTopic = - Objects.requireNonNullElse(System.getenv("KAFKA_INPUT_TOPIC"), "input"); - final String kafkaBatchSize = System.getenv("KAFKA_BATCH_SIZE"); - final String kafkaLingerMs = System.getenv("KAFKA_LINGER_MS"); - final String kafkaBufferMemory = System.getenv("KAFKA_BUFFER_MEMORY"); - final int instances = - Integer.parseInt(Objects.requireNonNullElse(System.getenv("INSTANCES"), "1")); - - // create kafka record sender - final Properties kafkaProperties = new Properties(); - // kafkaProperties.put("acks", this.acknowledges); - kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, (k, v) -> kafkaBatchSize); - kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, (k, v) -> kafkaLingerMs); - kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, (k, v) -> kafkaBufferMemory); + public static void main(final String[] args) { + final boolean sendRegistry = Boolean.parseBoolean(Objects.requireNonNullElse( + System.getenv("SEND_REGISTRY"), + "true")); + final String kafkaBootstrapServers = Objects.requireNonNullElse( + System.getenv("KAFKA_BOOTSTRAP_SERVERS"), + "localhost:9092"); + final int numSensors = Integer.parseInt(Objects.requireNonNullElse( + System.getenv("NUM_SENSORS"), + "1")); + final int numNestedGroups = Integer.parseInt(Objects.requireNonNullElse( + System.getenv("NUM_NESTED_GROUPS"), + "1")); - final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = - new KafkaRecordSender.Builder<ActivePowerRecord>( - kafkaBootstrapServers, - kafkaInputTopic, - schemaRegistryUrl) - .keyAccessor(r -> r.getIdentifier()) - .timestampAccessor(r -> r.getTimestamp()) - .defaultProperties(kafkaProperties) - .build(); + // Build sensor hierarchy + final SensorRegistry sensorRegistry = + new SensorRegistryBuilder(numNestedGroups, numSensors).build(); - // create workload generator - final KafkaWorkloadGenerator<ActivePowerRecord> workloadGenerator = - KafkaWorkloadGeneratorBuilder.<ActivePowerRecord>builder() - .instances(instances) - .keySpace(new KeySpace("s_", numSensors)) - .threads(threads) - .period(Duration.of(periodMs, ChronoUnit.MILLIS)) - .duration(Duration.of(MAX_DURATION_IN_DAYS, ChronoUnit.DAYS)) - .generatorFunction( - sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value)) - .zooKeeper(new ZooKeeper(zooKeeperHost, zooKeeperPort)) - .kafkaRecordSender(kafkaRecordSender) - .build(); + LOGGER.info("Start workload generator for use case UC4"); + theodolite.commons.workloadgeneration.LoadGenerator.fromEnvironment() + .withKeySpace(new KeySpace("s_", sensorRegistry.getMachineSensors().size())) + .withBeforeAction(() -> { + if (sendRegistry) { + final ConfigPublisher configPublisher = + new ConfigPublisher(kafkaBootstrapServers, "configuration"); + configPublisher.publish(Event.SENSOR_REGISTRY_CHANGED, sensorRegistry.toJson()); + configPublisher.close(); + LOGGER.info("Configuration sent."); - // start - workloadGenerator.start(); + LOGGER.info("Now wait 30 seconds..."); + try { + Thread.sleep(SLEEP_PERIOD); + } catch (final InterruptedException e) { + LOGGER.error(e.getMessage(), e); + } + LOGGER.info("...and start generating load."); + } + }) + .run(); } } diff --git a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java similarity index 97% rename from benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java rename to benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java index 7c34ac89471386f4ddd508a304f2197602beab27..60303056a01466b908b73e51377427f5d8347441 100644 --- a/benchmarks/uc2-workload-generator/src/main/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilder.java +++ b/benchmarks/uc4-workload-generator/src/main/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilder.java @@ -1,4 +1,4 @@ -package theodolite.uc2.workloadgenerator; +package theodolite.uc4.workloadgenerator; import titan.ccp.model.sensorregistry.MutableAggregatedSensor; import titan.ccp.model.sensorregistry.MutableSensorRegistry; diff --git a/benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties b/benchmarks/uc4-workload-generator/src/main/resources/META-INF/application.properties similarity index 100% rename from benchmarks/uc2-workload-generator/src/main/resources/META-INF/application.properties rename to benchmarks/uc4-workload-generator/src/main/resources/META-INF/application.properties diff --git a/benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java b/benchmarks/uc4-workload-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java similarity index 97% rename from benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java rename to benchmarks/uc4-workload-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java index 17b208edac4acafa92b7a75e053e2fe97a9afdb6..424c84ec96cdd90077fb7934686cd021b040e732 100644 --- a/benchmarks/uc2-workload-generator/src/test/java/theodolite/uc2/workloadgenerator/SensorRegistryBuilderTest.java +++ b/benchmarks/uc4-workload-generator/src/test/java/theodolite/uc4/workloadgenerator/SensorRegistryBuilderTest.java @@ -1,4 +1,4 @@ -package theodolite.uc2.workloadgenerator; +package theodolite.uc4.workloadgenerator; import java.util.Collection; diff --git a/benchmarks/workload-generator-commons/build.gradle b/benchmarks/workload-generator-commons/build.gradle index eef987cd444c3b6c3d8a532c8d192e94311176db..98d820b480ba0b357b74f82ebce5a647ee392461 100644 --- a/benchmarks/workload-generator-commons/build.gradle +++ b/benchmarks/workload-generator-commons/build.gradle @@ -1,3 +1,5 @@ dependencies { - implementation 'org.apache.curator:curator-recipes:4.3.0' + implementation 'com.google.guava:guava:30.1-jre' + implementation 'com.hazelcast:hazelcast:4.1.1' + implementation 'com.hazelcast:hazelcast-kubernetes:2.2.1' } \ No newline at end of file diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java similarity index 57% rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java index 7914a4985b6df40f7146c1fd681d1fba063f8b98..56af95d70f762095a6fe090457b7d4b473a43b1a 100644 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/BeforeAction.java +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/BeforeAction.java @@ -1,4 +1,4 @@ -package theodolite.commons.workloadgeneration.functions; +package theodolite.commons.workloadgeneration; /** * Describes the before action which is executed before every sub experiment. @@ -8,4 +8,9 @@ public interface BeforeAction { public void run(); + public static BeforeAction doNothing() { + return () -> { + }; + } + } diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..9d84dc67461f98fabdee4c8e0784ad7394d7f108 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ClusterConfig.java @@ -0,0 +1,76 @@ +package theodolite.commons.workloadgeneration; + +/** + * Configuration of a load generator cluster. + */ +public final class ClusterConfig { + + private static final int PORT_DEFAULT = 5701; + private static final String CLUSTER_NAME_PREFIX_DEFAULT = "theodolite-load-generation"; + + private final String bootstrapServer; + private final String kubernetesDnsName; + private int port = PORT_DEFAULT; + private boolean portAutoIncrement = true; + private String clusterNamePrefix = CLUSTER_NAME_PREFIX_DEFAULT; + + /** + * Create a new {@link ClusterConfig} with the given parameter values. + */ + private ClusterConfig(final String bootstrapServer, final String kubernetesDnsName) { + this.bootstrapServer = bootstrapServer; + this.kubernetesDnsName = kubernetesDnsName; + } + + public boolean hasBootstrapServer() { + return this.bootstrapServer != null; + } + + public String getBootstrapServer() { + return this.bootstrapServer; + } + + public boolean hasKubernetesDnsName() { + return this.kubernetesDnsName != null; + } + + public String getKubernetesDnsName() { + return this.kubernetesDnsName; + } + + public int getPort() { + return this.port; + } + + public boolean isPortAutoIncrement() { + return this.portAutoIncrement; + } + + public ClusterConfig setPortAutoIncrement(final boolean portAutoIncrement) { // NOPMD + this.portAutoIncrement = portAutoIncrement; + return this; + } + + public ClusterConfig setPort(final int port) { // NOPMD + this.port = port; + return this; + } + + public String getClusterNamePrefix() { + return this.clusterNamePrefix; + } + + public ClusterConfig setClusterNamePrefix(final String clusterNamePrefix) { // NOPMD + this.clusterNamePrefix = clusterNamePrefix; + return this; + } + + public static ClusterConfig fromBootstrapServer(final String bootstrapServer) { + return new ClusterConfig(bootstrapServer, null); + } + + public static ClusterConfig fromKubernetesDnsName(final String kubernetesDnsName) { + return new ClusterConfig(null, kubernetesDnsName); + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java new file mode 100644 index 0000000000000000000000000000000000000000..45ac1d5bb9c21a1b6303de2f248d08b69c02fc28 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/ConfigurationKeys.java @@ -0,0 +1,40 @@ +package theodolite.commons.workloadgeneration; + +/** + * Keys to access configuration parameters. + */ +public final class ConfigurationKeys { + + public static final String BOOTSTRAP_SERVER = "BOOTSTRAP_SERVER"; + + public static final String KUBERNETES_DNS_NAME = "KUBERNETES_DNS_NAME"; + + public static final String PORT = "PORT"; + + public static final String PORT_AUTO_INCREMENT = "PORT_AUTO_INCREMENT"; + + public static final String CLUSTER_NAME_PREFIX = "CLUSTER_NAME_PREFIX"; + + public static final String NUM_SENSORS = "NUM_SENSORS"; + + public static final String PERIOD_MS = "PERIOD_MS"; + + public static final String VALUE = "VALUE"; + + public static final String THREADS = "THREADS"; + + public static final String KAFKA_BOOTSTRAP_SERVERS = "KAFKA_BOOTSTRAP_SERVERS"; + + public static final String SCHEMA_REGISTRY_URL = "SCHEMA_REGISTRY_URL"; + + public static final String KAFKA_INPUT_TOPIC = "KAFKA_INPUT_TOPIC"; + + public static final String KAFKA_BATCH_SIZE = "KAFKA_BATCH_SIZE"; + + public static final String KAFKA_LINGER_MS = "KAFKA_LINGER_MS"; + + public static final String KAFKA_BUFFER_MEMORY = "KAFKA_BUFFER_MEMORY"; + + private ConfigurationKeys() {} + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java new file mode 100644 index 0000000000000000000000000000000000000000..c010492950c5caace9ff85baefee1af4e46d25bb --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunner.java @@ -0,0 +1,107 @@ +package theodolite.commons.workloadgeneration; + +import com.hazelcast.cluster.Member; +import com.hazelcast.cluster.MembershipEvent; +import com.hazelcast.cluster.MembershipListener; +import com.hazelcast.config.Config; +import com.hazelcast.config.JoinConfig; +import com.hazelcast.core.Hazelcast; +import com.hazelcast.core.HazelcastInstance; +import java.util.Set; +import java.util.concurrent.CompletableFuture; + +/** + * A Theodolite load generator runner that establishes a cluster using Hazelcast. + */ +public class HazelcastRunner { + + private static final String HZ_KUBERNETES_SERVICE_DNS_KEY = "service-dns"; + private final HazelcastInstance hzInstance; + private volatile HazelcastRunnerStateInstance runnerState; + private final CompletableFuture<Void> stopAction = new CompletableFuture<>(); + private final LoadGeneratorConfig loadConfig; + private final WorkloadDefinition totalLoadDefinition; + + /** + * Create a new {@link HazelcastRunner} from the given configuration. + */ + public HazelcastRunner( + final ClusterConfig clusterConfig, + final LoadGeneratorConfig loadConfig, + final WorkloadDefinition totalLoadDefinition) { + this.loadConfig = loadConfig; + this.totalLoadDefinition = totalLoadDefinition; + this.hzInstance = buildhazelcastInstance(clusterConfig, totalLoadDefinition.toString()); + this.hzInstance.getCluster().addMembershipListener(new RunnerMembershipListener()); + } + + /** + * Start the workload generation and blocks until the workload generation is stopped again. + */ + public void runBlocking() { + while (!this.stopAction.isDone()) { + synchronized (this) { + final Set<Member> members = this.hzInstance.getCluster().getMembers(); + this.runnerState = new HazelcastRunnerStateInstance( + this.loadConfig, + this.totalLoadDefinition, + this.hzInstance, members); + } + this.runnerState.runBlocking(); + } + } + + public void restart() { + this.stopRunnerState(); + } + + public void stop() { + this.stopAction.complete(null); + this.stopRunnerState(); + } + + private void stopRunnerState() { + synchronized (this) { + if (this.runnerState != null) { + this.runnerState.stopAsync(); + } + } + } + + private class RunnerMembershipListener implements MembershipListener { + + @Override + public void memberAdded(final MembershipEvent membershipEvent) { + HazelcastRunner.this.restart(); + } + + @Override + public void memberRemoved(final MembershipEvent membershipEvent) { + HazelcastRunner.this.restart(); + } + + } + + private static HazelcastInstance buildhazelcastInstance( + final ClusterConfig cluster, + final String clusterName) { + final Config config = new Config() + .setClusterName(cluster.getClusterNamePrefix() + '_' + clusterName); + + final JoinConfig joinConfig = config.getNetworkConfig() + .setPort(cluster.getPort()) + .setPortAutoIncrement(cluster.isPortAutoIncrement()) + .getJoin(); + joinConfig.getMulticastConfig().setEnabled(false); + if (cluster.hasBootstrapServer()) { + joinConfig.getTcpIpConfig().addMember(cluster.getBootstrapServer()); + } else if (cluster.hasKubernetesDnsName()) { + joinConfig.getKubernetesConfig() + .setEnabled(true) + .setProperty(HZ_KUBERNETES_SERVICE_DNS_KEY, cluster.getKubernetesDnsName()); + } + + return Hazelcast.newHazelcastInstance(config); + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java new file mode 100644 index 0000000000000000000000000000000000000000..d8fd7de421b88749a2077f81329213ff754e1608 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/HazelcastRunnerStateInstance.java @@ -0,0 +1,196 @@ +package theodolite.commons.workloadgeneration; + +import com.google.common.collect.Streams; +import com.hazelcast.cluster.Member; +import com.hazelcast.core.HazelcastInstance; +import com.hazelcast.cp.IAtomicReference; +import com.hazelcast.cp.lock.FencedLock; +import java.time.Duration; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * An instance of a Hazelcast runner state, that is a load generator cluster with a given set of + * members. + */ +public class HazelcastRunnerStateInstance { + + private static final Logger LOGGER = LoggerFactory.getLogger(HazelcastRunnerStateInstance.class); + + private static final Duration BEFORE_ACTION_WAIT_DURATION = Duration.ofMillis(500); + private static final Duration TASK_ASSIGNMENT_WAIT_DURATION = Duration.ofMillis(500); + + private final CompletableFuture<Void> stopAction = new CompletableFuture<>(); + private LoadGeneratorExecution loadGeneratorExecution; + + private final LoadGeneratorConfig loadGeneratorConfig; + private final WorkloadDefinition totalLoadDefinition; + private final HazelcastInstance hzInstance; + private final Set<Member> members; + + /** + * Create a new {@link HazelcastRunnerStateInstance}. + */ + public HazelcastRunnerStateInstance( + final LoadGeneratorConfig loadGeneratorConfig, + final WorkloadDefinition totalLoadDefinition, + final HazelcastInstance hzInstance, + final Set<Member> members) { + this.hzInstance = hzInstance; + this.members = members; + this.loadGeneratorConfig = loadGeneratorConfig; + this.totalLoadDefinition = totalLoadDefinition; + + LOGGER.info("Created new Hazelcast runner instance for member set '{}'", this.members); + } + + /** + * Start and block load generation for the configured member set. + */ + public void runBlocking() { + if (!this.stopAction.isDone()) { + this.tryPerformBeforeAction(); + this.tryCreateTaskAssignment(); + this.startLoadGeneration(); + } + this.stopAction.join(); + this.stopLoadGeneration(); + } + + public void stopAsync() { + this.stopAction.complete(null); + } + + private void tryPerformBeforeAction() { + final FencedLock lock = this.getBeforeActionPerformerLock(); + final IAtomicReference<Boolean> isActionPerformed = this.getIsBeforeActionPerformed(); // NOPMD + isActionPerformed.alter(p -> p != null && p); // p -> p == null ? false : p + boolean triedPerformingBeforeAction = false; + while (!isActionPerformed.get()) { + // Try performing the before action + triedPerformingBeforeAction = true; + if (lock.tryLock()) { + try { + if (!isActionPerformed.get()) { + LOGGER.info("This instance is elected to perform the before action."); + this.loadGeneratorConfig.getBeforeAction().run(); + LOGGER.info("Before action performed."); + isActionPerformed.set(true); + } + } finally { + lock.unlock(); + } + } else { + LOGGER.info("Wait for before action to be performed."); + delay(BEFORE_ACTION_WAIT_DURATION); + } + } + if (!triedPerformingBeforeAction) { + LOGGER.info("Before action has already been performed."); + } + } + + + + private void tryCreateTaskAssignment() { + final Map<UUID, WorkloadDefinition> taskAssignment = this.getTaskAssignment(); + final FencedLock lock = this.getTaskAssignmentLock(); + + boolean triedCreatingTaskAssignment = false; + while (taskAssignment.size() != this.members.size()) { + // Try creating task assignment + triedCreatingTaskAssignment = true; + if (lock.tryLock()) { + try { + if (taskAssignment.size() != this.members.size()) { + LOGGER.info("This instance is elected to create the task assignment."); + + final Set<WorkloadDefinition> subLoadDefinitions = + this.totalLoadDefinition.divide(this.members.size()); + Streams + .zip( + subLoadDefinitions.stream(), + this.members.stream(), + (loadDef, member) -> new LoadDefPerMember(loadDef, member)) + .forEach(l -> taskAssignment.put(l.member.getUuid(), l.loadDefinition)); + + LOGGER.info("Task assignment created."); + } + } finally { + lock.unlock(); + } + } else { + LOGGER.info("Wait for task assignment to be available."); + delay(TASK_ASSIGNMENT_WAIT_DURATION); + } + } + if (!triedCreatingTaskAssignment) { + LOGGER.info("Task assignment is already available."); + } + } + + private void startLoadGeneration() { + if (this.loadGeneratorExecution != null) { + throw new IllegalStateException("Load generation has already started before."); + } + LOGGER.info("Start running load generation and pick assigned task."); + + final Member member = this.hzInstance.getCluster().getLocalMember(); + final WorkloadDefinition workload = this.getTaskAssignment().get(member.getUuid()); + + LOGGER.info("Run load generation for assigned task: {}", workload); + this.loadGeneratorExecution = this.loadGeneratorConfig.buildLoadGeneratorExecution(workload); + this.loadGeneratorExecution.start(); + } + + private void stopLoadGeneration() { + this.loadGeneratorExecution.stop(); + } + + private IAtomicReference<Boolean> getIsBeforeActionPerformed() { + return this.hzInstance.getCPSubsystem().getAtomicReference("isBeforeActionPerformed"); + } + + private FencedLock getBeforeActionPerformerLock() { + return this.hzInstance.getCPSubsystem().getLock("beforeActionPerformer"); + } + + private Map<UUID, WorkloadDefinition> getTaskAssignment() { + return this.hzInstance.getReplicatedMap(this.getTaskAssignmentName()); + } + + private FencedLock getTaskAssignmentLock() { + return this.hzInstance.getCPSubsystem().getLock(this.getTaskAssignmentName() + "_assigner"); + } + + private String getTaskAssignmentName() { + return this.members.stream() + .map(m -> m.getUuid().toString()) + .collect(Collectors.joining("/")); + } + + private static void delay(final Duration duration) { + try { + TimeUnit.MILLISECONDS.sleep(duration.toMillis()); + } catch (final InterruptedException e) { + throw new IllegalStateException(e); + } + } + + private static final class LoadDefPerMember { + public final WorkloadDefinition loadDefinition; // NOCS used only internally + public final Member member; // NOCS used only internally + + public LoadDefPerMember(final WorkloadDefinition loadDefinition, final Member member) { + this.loadDefinition = loadDefinition; + this.member = member; + } + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java similarity index 88% rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java index 33818b51084ce33a564d6f30cefb26b481d0a859..dd17234bf1adb1f0fcf3ff3ab134a0743b917369 100644 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/kafka/KafkaRecordSender.java +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KafkaRecordSender.java @@ -1,4 +1,4 @@ -package theodolite.commons.workloadgeneration.communication.kafka; +package theodolite.commons.workloadgeneration; import java.util.Properties; import java.util.function.Function; @@ -9,7 +9,6 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.common.serialization.StringSerializer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.functions.Transport; import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; /** @@ -17,7 +16,7 @@ import titan.ccp.common.kafka.avro.SchemaRegistryAvroSerdeFactory; * * @param <T> {@link IMonitoringRecord} to send */ -public class KafkaRecordSender<T extends SpecificRecord> implements Transport<T> { +public class KafkaRecordSender<T extends SpecificRecord> implements RecordSender<T> { private static final Logger LOGGER = LoggerFactory.getLogger(KafkaRecordSender.class); @@ -47,10 +46,19 @@ public class KafkaRecordSender<T extends SpecificRecord> implements Transport<T> final SchemaRegistryAvroSerdeFactory avroSerdeFactory = new SchemaRegistryAvroSerdeFactory(builder.schemaRegistryUrl); - this.producer = new KafkaProducer<>(properties, new StringSerializer(), + this.producer = new KafkaProducer<>( + properties, + new StringSerializer(), avroSerdeFactory.<T>forKeys().serializer()); } + public static <T extends SpecificRecord> Builder<T> builder( + final String bootstrapServers, + final String topic, + final String schemaRegistryUrl) { + return new Builder<>(bootstrapServers, topic, schemaRegistryUrl); + } + /** * Builder class to build a new {@link KafkaRecordSender}. * @@ -72,7 +80,7 @@ public class KafkaRecordSender<T extends SpecificRecord> implements Transport<T> * @param topic The topic where to write. * @param schemaRegistryUrl URL to the schema registry for avro. */ - public Builder(final String bootstrapServers, final String topic, + private Builder(final String bootstrapServers, final String topic, final String schemaRegistryUrl) { this.bootstrapServers = bootstrapServers; this.topic = topic; @@ -116,7 +124,7 @@ public class KafkaRecordSender<T extends SpecificRecord> implements Transport<T> } @Override - public void transport(final T message) { + public void send(final T message) { this.write(message); } diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java new file mode 100644 index 0000000000000000000000000000000000000000..51255d774427a9e00de0d4c921b884022585edab --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/KeySpace.java @@ -0,0 +1,72 @@ +package theodolite.commons.workloadgeneration; + +import java.io.Serializable; +import java.util.Collection; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * A set of keys, where each key consists of a prefix and a number. + */ +public class KeySpace implements Serializable { + + private static final long serialVersionUID = 7343135392720315515L; // NOPMD + + private final String prefix; + private final int min; + private final int max; + + /** + * Create a new key space. All keys will have the prefix {@code prefix}. The remaining part of + * each key will be determined by a number of the interval ({@code min}, {@code max}). + * + * @param prefix the prefix to use for all keys + * @param min the lower bound (inclusive) to start counting from + * @param max the upper bound (inclusive) to count to + */ + public KeySpace(final String prefix, final int min, final int max) { + this.prefix = prefix; + this.min = min; + this.max = max; + } + + public KeySpace(final String prefix, final int numberOfKeys) { + this(prefix, 0, numberOfKeys - 1); + } + + public String getPrefix() { + return this.prefix; + } + + + public int getMin() { + return this.min; + } + + + public int getMax() { + return this.max; + } + + /** + * Get the amount of keys in this {@link KeySpace}. + */ + public int getCount() { + return this.getMax() - this.getMin() + 1; + } + + /** + * Get all keys in this {@link KeySpace}. + */ + public Collection<String> getKeys() { + return IntStream.rangeClosed(this.min, this.max) + .mapToObj(id -> this.prefix + id) + .collect(Collectors.toUnmodifiableList()); + } + + @Override + public String toString() { + return this.prefix + '[' + this.min + '-' + this.max + ']'; + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java new file mode 100644 index 0000000000000000000000000000000000000000..a9a1ce65ac32e3508299c99a38ecd21e4c9461cf --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGenerator.java @@ -0,0 +1,183 @@ +package theodolite.commons.workloadgeneration; + +import java.time.Duration; +import java.util.Objects; +import java.util.Properties; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A Theodolite load generator. + */ +public final class LoadGenerator { + + private static final Logger LOGGER = LoggerFactory.getLogger(LoadGenerator.class); + + private static final String BOOTSTRAP_SERVER_DEFAULT = "localhost:5701"; + private static final String SENSOR_PREFIX_DEFAULT = "s_"; + private static final int NUMBER_OF_KEYS_DEFAULT = 10; + private static final int PERIOD_MS_DEFAULT = 1000; + private static final int VALUE_DEFAULT = 10; + private static final int THREADS_DEFAULT = 4; + private static final String SCHEMA_REGISTRY_URL_DEFAULT = "http://localhost:8081"; + private static final String KAFKA_TOPIC_DEFAULT = "input"; + private static final String KAFKA_BOOTSTRAP_SERVERS_DEFAULT = "localhost:19092"; // NOPMD + + private ClusterConfig clusterConfig; + private WorkloadDefinition loadDefinition; + private LoadGeneratorConfig generatorConfig; + private boolean isStarted; + + private LoadGenerator() {} + + // Add constructor for creating from environment variables + + public LoadGenerator setClusterConfig(final ClusterConfig clusterConfig) { // NOPMD + this.clusterConfig = clusterConfig; + return this; + } + + public LoadGenerator setLoadDefinition(final WorkloadDefinition loadDefinition) { // NOPMD + this.loadDefinition = loadDefinition; + return this; + } + + public LoadGenerator setGeneratorConfig(final LoadGeneratorConfig generatorConfig) { // NOPMD + this.generatorConfig = generatorConfig; + return this; + } + + public LoadGenerator withKeySpace(final KeySpace keySpace) { + this.loadDefinition = new WorkloadDefinition(keySpace, this.loadDefinition.getPeriod()); + return this; + } + + public LoadGenerator withBeforeAction(final BeforeAction beforeAction) { + this.generatorConfig.setBeforeAction(beforeAction); + return this; + } + + public LoadGenerator withThreads(final int threads) { + this.generatorConfig.setThreads(threads); + return this; + } + + /** + * Run the constructed load generator until cancellation. + */ + public void run() { + Objects.requireNonNull(this.clusterConfig, "No cluster config set."); + Objects.requireNonNull(this.generatorConfig, "No generator config set."); + Objects.requireNonNull(this.loadDefinition, "No load definition set."); + if (this.isStarted) { + throw new IllegalStateException("Load generator can only be started once."); + } + this.isStarted = true; + final HazelcastRunner runner = new HazelcastRunner( + this.clusterConfig, + this.generatorConfig, + this.loadDefinition); + runner.runBlocking(); + } + + /** + * Create a basic {@link LoadGenerator} from its default values. + */ + public static LoadGenerator fromDefaults() { + return new LoadGenerator() + .setClusterConfig(ClusterConfig.fromBootstrapServer(BOOTSTRAP_SERVER_DEFAULT)) + .setLoadDefinition(new WorkloadDefinition( + new KeySpace(SENSOR_PREFIX_DEFAULT, NUMBER_OF_KEYS_DEFAULT), + Duration.ofMillis(PERIOD_MS_DEFAULT))) + .setGeneratorConfig(new LoadGeneratorConfig( + TitanMessageGeneratorFactory + .withKafkaConfig( + KAFKA_BOOTSTRAP_SERVERS_DEFAULT, + KAFKA_TOPIC_DEFAULT, + SCHEMA_REGISTRY_URL_DEFAULT) + .forConstantValue(VALUE_DEFAULT))); + } + + /** + * Create a basic {@link LoadGenerator} from environment variables. + */ + public static LoadGenerator fromEnvironment() { + final String bootstrapServer = System.getenv(ConfigurationKeys.BOOTSTRAP_SERVER); + final String kubernetesDnsName = System.getenv(ConfigurationKeys.KUBERNETES_DNS_NAME); + + ClusterConfig clusterConfig; + if (bootstrapServer != null) { // NOPMD + clusterConfig = ClusterConfig.fromBootstrapServer(bootstrapServer); + LOGGER.info("Use bootstrap server '{}'.", bootstrapServer); + } else if (kubernetesDnsName != null) { // NOPMD + clusterConfig = ClusterConfig.fromKubernetesDnsName(kubernetesDnsName); + LOGGER.info("Use Kubernetes DNS name '{}'.", kubernetesDnsName); + } else { + clusterConfig = ClusterConfig.fromBootstrapServer(BOOTSTRAP_SERVER_DEFAULT); + LOGGER.info( + "Neither a bootstrap server nor a Kubernetes DNS name was provided. Use default bootstrap server '{}'.", // NOCS + BOOTSTRAP_SERVER_DEFAULT); + } + + final String port = System.getenv(ConfigurationKeys.PORT); + if (port != null) { + clusterConfig.setPort(Integer.parseInt(port)); + } + + final String portAutoIncrement = System.getenv(ConfigurationKeys.PORT_AUTO_INCREMENT); + if (portAutoIncrement != null) { + clusterConfig.setPortAutoIncrement(Boolean.parseBoolean(portAutoIncrement)); + } + + final String clusterNamePrefix = System.getenv(ConfigurationKeys.CLUSTER_NAME_PREFIX); + if (clusterNamePrefix != null) { + clusterConfig.setClusterNamePrefix(portAutoIncrement); + } + + final int numSensors = Integer.parseInt(Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.NUM_SENSORS), + Integer.toString(NUMBER_OF_KEYS_DEFAULT))); + final int periodMs = Integer.parseInt(Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.PERIOD_MS), + Integer.toString(PERIOD_MS_DEFAULT))); + final double value = Double.parseDouble(Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.VALUE), + Integer.toString(VALUE_DEFAULT))); + final int threads = Integer.parseInt(Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.THREADS), + Integer.toString(THREADS_DEFAULT))); + final String kafkaBootstrapServers = Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.KAFKA_BOOTSTRAP_SERVERS), + KAFKA_BOOTSTRAP_SERVERS_DEFAULT); + final String kafkaInputTopic = Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.KAFKA_INPUT_TOPIC), + KAFKA_TOPIC_DEFAULT); + final String schemaRegistryUrl = Objects.requireNonNullElse( + System.getenv(ConfigurationKeys.SCHEMA_REGISTRY_URL), + SCHEMA_REGISTRY_URL_DEFAULT); + final Properties kafkaProperties = new Properties(); + kafkaProperties.compute(ProducerConfig.BATCH_SIZE_CONFIG, + (k, v) -> System.getenv(ConfigurationKeys.KAFKA_BATCH_SIZE)); + kafkaProperties.compute(ProducerConfig.LINGER_MS_CONFIG, + (k, v) -> System.getenv(ConfigurationKeys.KAFKA_LINGER_MS)); + kafkaProperties.compute(ProducerConfig.BUFFER_MEMORY_CONFIG, + (k, v) -> System.getenv(ConfigurationKeys.KAFKA_BUFFER_MEMORY)); + + return new LoadGenerator() + .setClusterConfig(clusterConfig) + .setLoadDefinition(new WorkloadDefinition( + new KeySpace(SENSOR_PREFIX_DEFAULT, numSensors), + Duration.ofMillis(periodMs))) + .setGeneratorConfig(new LoadGeneratorConfig( + TitanMessageGeneratorFactory + .withKafkaConfig( + kafkaBootstrapServers, + kafkaInputTopic, + schemaRegistryUrl, + kafkaProperties) + .forConstantValue(value))) + .withThreads(threads); + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java new file mode 100644 index 0000000000000000000000000000000000000000..2e907d8e90172288099bc6a1776777c37ae90fff --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorConfig.java @@ -0,0 +1,42 @@ +package theodolite.commons.workloadgeneration; + +/** + * Configuration of a load generator. + */ +public class LoadGeneratorConfig { + + private final MessageGenerator messageGenerator; + private BeforeAction beforeAction = BeforeAction.doNothing(); + private int threads = 1; + + public LoadGeneratorConfig(final MessageGenerator messageGenerator) { + this.messageGenerator = messageGenerator; + } + + public LoadGeneratorConfig( + final MessageGenerator messageGenerator, + final int threads) { + this.messageGenerator = messageGenerator; + this.threads = threads; + } + + public LoadGeneratorExecution buildLoadGeneratorExecution( + final WorkloadDefinition workloadDefinition) { + return new LoadGeneratorExecution(workloadDefinition, this.messageGenerator, this.threads); + } + + public BeforeAction getBeforeAction() { + return this.beforeAction; + } + + public void setThreads(final int threads) { + this.threads = threads; + } + + public void setBeforeAction(final BeforeAction beforeAction) { + this.beforeAction = beforeAction; + } + + + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java new file mode 100644 index 0000000000000000000000000000000000000000..3934c3d3499215b37ce96391ff5ae1d5cc135f84 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/LoadGeneratorExecution.java @@ -0,0 +1,56 @@ +package theodolite.commons.workloadgeneration; + +import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A {@link LoadGeneratorExecution} represents the execution of load generator, i.e., it can be + * started and stopped. + */ +public class LoadGeneratorExecution { + + private static final Logger LOGGER = LoggerFactory.getLogger(LoadGeneratorExecution.class); + + private final Random random = new Random(); + private final WorkloadDefinition workloadDefinition; + private final MessageGenerator messageGenerator; + private final ScheduledExecutorService executor; + + /** + * Create a new {@link LoadGeneratorExecution} for a given {@link WorkloadDefinition} and a + * {@link MessageGenerator}. Load is generated by the given number of threads. + */ + public LoadGeneratorExecution( + final WorkloadDefinition workloadDefinition, + final MessageGenerator messageGenerator, + final int threads) { + this.workloadDefinition = workloadDefinition; + this.messageGenerator = messageGenerator; + this.executor = Executors.newScheduledThreadPool(threads); + } + + /** + * Start the load generation and run it until it is stopped. + */ + public void start() { + LOGGER.info("Beginning of Experiment..."); + LOGGER.info("Generating records for {} keys.", + this.workloadDefinition.getKeySpace().getCount()); + LOGGER.info("Experiment is going to be executed until cancelation..."); + + final int periodMs = (int) this.workloadDefinition.getPeriod().toMillis(); + for (final String key : this.workloadDefinition.getKeySpace().getKeys()) { + final long initialDelay = this.random.nextInt(periodMs); + final Runnable task = () -> this.messageGenerator.generate(key); + this.executor.scheduleAtFixedRate(task, initialDelay, periodMs, TimeUnit.MILLISECONDS); + } + } + + public void stop() { + this.executor.shutdownNow(); + } +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java new file mode 100644 index 0000000000000000000000000000000000000000..c369f16557d60dae50e22ec7ad820c6a0ab4d137 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/MessageGenerator.java @@ -0,0 +1,18 @@ +package theodolite.commons.workloadgeneration; + +/** + * Interface representing a message generator, which sends messages for given keys to some + * destination. + */ +@FunctionalInterface +public interface MessageGenerator { + + void generate(final String key); + + public static <T> MessageGenerator from( + final RecordGenerator<T> generator, + final RecordSender<T> sender) { + return key -> sender.send(generator.generate(key)); + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java new file mode 100644 index 0000000000000000000000000000000000000000..ea6501f38ea57bf6cefb5c76b05f442454ca0d99 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordGenerator.java @@ -0,0 +1,14 @@ +package theodolite.commons.workloadgeneration; + +/** + * This interface describes a function that takes meta information from a string key and produces an + * object of type T. + * + * @param <T> the type of the objects that will be generated by the function. + */ +@FunctionalInterface +public interface RecordGenerator<T> { + + T generate(final String key); + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java similarity index 68% rename from benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java rename to benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java index 7e5100a4e99f13a98156311a9d892c9626b2318a..ee57f2f239a34dd6f8f329d47e4d698427e371b0 100644 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/Transport.java +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/RecordSender.java @@ -1,4 +1,4 @@ -package theodolite.commons.workloadgeneration.functions; +package theodolite.commons.workloadgeneration; /** * This interface describes a function that consumes a message {@code T}. This function is dedicated @@ -7,8 +7,8 @@ package theodolite.commons.workloadgeneration.functions; * @param <T> the type of records to send as messages. */ @FunctionalInterface -public interface Transport<T> { +public interface RecordSender<T> { - void transport(final T message); + void send(final T message); } diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java new file mode 100644 index 0000000000000000000000000000000000000000..bd0b41d4e6e004d024ed2fd179eddcf6af50438f --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/TitanMessageGeneratorFactory.java @@ -0,0 +1,57 @@ +package theodolite.commons.workloadgeneration; + +import java.util.Properties; +import titan.ccp.model.records.ActivePowerRecord; + +/** + * A factory for creating {@link MessageGenerator}s that creates Titan {@link ActivePowerRecord}s + * and sends them via Kafka. + */ +public final class TitanMessageGeneratorFactory { + + private final RecordSender<ActivePowerRecord> recordSender; + + private TitanMessageGeneratorFactory(final RecordSender<ActivePowerRecord> recordSender) { + this.recordSender = recordSender; + } + + /** + * Create a {@link MessageGenerator} that generates Titan {@link ActivePowerRecord}s with a + * constant value. + */ + public MessageGenerator forConstantValue(final double value) { + return MessageGenerator.from( + sensor -> new ActivePowerRecord(sensor, System.currentTimeMillis(), value), + this.recordSender); + } + + /** + * Create a new TitanMessageGeneratorFactory for the given Kafka configuration. + */ + public static TitanMessageGeneratorFactory withKafkaConfig( + final String bootstrapServers, + final String topic, + final String schemaRegistryUrl) { + return withKafkaConfig(bootstrapServers, topic, schemaRegistryUrl, new Properties()); + } + + /** + * Create a new TitanMessageGeneratorFactory for the given Kafka configuration. + */ + public static TitanMessageGeneratorFactory withKafkaConfig( + final String bootstrapServers, + final String topic, + final String schemaRegistryUrl, + final Properties properties) { + final KafkaRecordSender<ActivePowerRecord> kafkaRecordSender = KafkaRecordSender + .<ActivePowerRecord>builder( + bootstrapServers, + topic, + schemaRegistryUrl) + .keyAccessor(r -> r.getIdentifier()) + .timestampAccessor(r -> r.getTimestamp()) + .build(); + return new TitanMessageGeneratorFactory(kafkaRecordSender); + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java new file mode 100644 index 0000000000000000000000000000000000000000..5795cad7a4d942476116f6453758aa2304b5eda0 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/WorkloadDefinition.java @@ -0,0 +1,71 @@ +package theodolite.commons.workloadgeneration; + +import java.io.Serializable; +import java.time.Duration; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +/** + * Definition of a workload consisting of a {@link KeySpace} and a period with which messages will + * be generated for key of that {@link KeySpace}. + */ +public class WorkloadDefinition implements Serializable { + + private static final long serialVersionUID = -8337364281221817001L; // NOPMD + + private final KeySpace keySpace; + private final Duration period; + + /** + * Create a new workload definition. + * + * @param keySpace the key space to use. + */ + public WorkloadDefinition( + final KeySpace keySpace, + final Duration period) { + this.keySpace = keySpace; + this.period = period; + } + + public KeySpace getKeySpace() { + return this.keySpace; + } + + public Duration getPeriod() { + return this.period; + } + + /** + * Divide this {@link WorkloadDefinition} into {@code parts} {@link WorkloadDefinition}s by + * distributing its {@link KeySpace} (almost) equally among all {@link WorkloadDefinition}s. + */ + public Set<WorkloadDefinition> divide(final int parts) { + final int effParts = Math.min(parts, this.keySpace.getCount()); + final int minSize = this.keySpace.getCount() / effParts; + final int largerParts = this.keySpace.getCount() % effParts; + return IntStream.range(0, effParts) + .mapToObj(part -> { + final int thisSize = part < largerParts ? minSize + 1 : minSize; + final int largePartsBefore = Math.min(largerParts, part); + final int smallPartsBefore = part - largePartsBefore; + final int start = largePartsBefore * (minSize + 1) + smallPartsBefore * minSize; + final int end = start + thisSize - 1; + return new KeySpace( + this.keySpace.getPrefix(), + start, + end); + }) + .map(keySpace -> new WorkloadDefinition( + keySpace, + this.period)) + .collect(Collectors.toUnmodifiableSet()); + } + + @Override + public String toString() { + return this.keySpace + ";" + this.period.toMillis(); + } + +} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java deleted file mode 100644 index 2249abcbcb1071cf880b2ee80f5d41f2b3dab463..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/communication/zookeeper/WorkloadDistributor.java +++ /dev/null @@ -1,202 +0,0 @@ -package theodolite.commons.workloadgeneration.communication.zookeeper; - -import java.nio.charset.StandardCharsets; -import java.util.function.BiConsumer; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.framework.api.CuratorWatcher; -import org.apache.curator.framework.recipes.atomic.AtomicValue; -import org.apache.curator.framework.recipes.atomic.DistributedAtomicInteger; -import org.apache.curator.retry.ExponentialBackoffRetry; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher.Event.EventType; -import org.apache.zookeeper.data.Stat; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.functions.BeforeAction; -import theodolite.commons.workloadgeneration.misc.WorkloadDefinition; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; - -/** - * The central class responsible for distributing the workload through all workload generators. - */ -public class WorkloadDistributor { - - private static final Logger LOGGER = LoggerFactory.getLogger(WorkloadDistributor.class); - - private static final String NAMESPACE = "workload-generation"; - private static final String COUNTER_PATH = "/counter"; - private static final String WORKLOAD_PATH = "/workload"; - private static final String WORKLOAD_DEFINITION_PATH = "/workload/definition"; - - // Curator retry strategy - private static final int BASE_SLEEP_TIME_MS = 2000; - private static final int MAX_RETRIES = 5; - - // Wait time - private static final int MAX_WAIT_TIME = 20_000; - - private final DistributedAtomicInteger counter; - private final KeySpace keySpace; - private final BeforeAction beforeAction; - private final BiConsumer<WorkloadDefinition, Integer> workerAction; - - private final int instances; - private final ZooKeeper zooKeeper; // NOPMD keep instance variable instead of local variable - private final CuratorFramework client; - - private boolean workloadGenerationStarted = false; // NOPMD explicit intention that false - - /** - * Create a new workload distributor. - * - * @param keySpace the keyspace for the workload generation. - * @param beforeAction the before action for the workload generation. - * @param workerAction the action to perform by the workers. - */ - public WorkloadDistributor( - final int instances, - final ZooKeeper zooKeeper, - final KeySpace keySpace, - final BeforeAction beforeAction, - final BiConsumer<WorkloadDefinition, Integer> workerAction) { - this.instances = instances; - this.zooKeeper = zooKeeper; - this.keySpace = keySpace; - this.beforeAction = beforeAction; - this.workerAction = workerAction; - - this.client = CuratorFrameworkFactory.builder() - .namespace(NAMESPACE) - .connectString(this.zooKeeper.getHost() + ":" + this.zooKeeper.getPort()) - .retryPolicy(new ExponentialBackoffRetry(BASE_SLEEP_TIME_MS, MAX_RETRIES)) - .build(); - - this.client.start(); - - try { - this.client.blockUntilConnected(); - } catch (final InterruptedException e) { - LOGGER.error(e.getMessage(), e); - throw new IllegalStateException(e); - } - - this.counter = - new DistributedAtomicInteger(this.client, COUNTER_PATH, - new ExponentialBackoffRetry(BASE_SLEEP_TIME_MS, MAX_RETRIES)); - } - - /** - * Start the workload distribution. - */ - public void start() { - try { - AtomicValue<Integer> result = this.counter.increment(); - while (!result.succeeded()) { - result = this.counter.increment(); - } - - final int workerId = result.preValue(); - - final CuratorWatcher watcher = this.buildWatcher(workerId); - - final Stat nodeExists = - this.client.checkExists().creatingParentsIfNeeded().forPath(WORKLOAD_PATH); - if (nodeExists == null) { - this.client.create().forPath(WORKLOAD_PATH); - } - - if (workerId == 0) { - LOGGER.info("This instance is master with id {}", workerId); - - this.beforeAction.run(); - - // register worker action, as master acts also as worker - this.client.getChildren().usingWatcher(watcher).forPath(WORKLOAD_PATH); - - LOGGER.info("Number of Workers: {}", this.instances); - - final WorkloadDefinition definition = - new WorkloadDefinition(this.keySpace, this.instances); - - this.client.create().withMode(CreateMode.EPHEMERAL).forPath(WORKLOAD_DEFINITION_PATH, - definition.toString().getBytes(StandardCharsets.UTF_8)); - - } else { - LOGGER.info("This instance is worker with id {}", workerId); - - this.client.getChildren().usingWatcher(watcher).forPath(WORKLOAD_PATH); - - final Stat definitionExists = - this.client.checkExists().creatingParentsIfNeeded().forPath(WORKLOAD_DEFINITION_PATH); - - if (definitionExists != null) { - this.startWorkloadGeneration(workerId); - } - } - - Thread.sleep(MAX_WAIT_TIME); - - if (!this.workloadGenerationStarted) { - LOGGER.warn("No workload definition retrieved for 20 s. Terminating now.."); - } - } catch (final Exception e) { // NOPMD need to catch exception because of external framework - LOGGER.error(e.getMessage(), e); - throw new IllegalStateException("Error when starting the distribution of the workload.", e); - } - } - - /** - * Start the workload generation. This methods body does only get executed once. - * - * @param workerId the ID of this worker - * @throws Exception when an error occurs - */ - // NOPMD because exception thrown from used framework - private synchronized void startWorkloadGeneration(final int workerId) throws Exception { // NOPMD - - if (!this.workloadGenerationStarted) { - this.workloadGenerationStarted = true; - - final byte[] bytes = - this.client.getData().forPath(WORKLOAD_DEFINITION_PATH); - final WorkloadDefinition definition = - WorkloadDefinition.fromString(new String(bytes, StandardCharsets.UTF_8)); - - this.workerAction.accept(definition, workerId); - } - } - - /** - * Build a curator watcher which performs the worker action. - * - * @param worker the worker to create the watcher for. - * @return the curator watcher. - */ - private CuratorWatcher buildWatcher(final int workerId) { - return new CuratorWatcher() { - - @Override - public void process(final WatchedEvent event) { - if (event.getType() == EventType.NodeChildrenChanged) { - try { - WorkloadDistributor.this.startWorkloadGeneration(workerId); - } catch (final Exception e) { // NOPMD external framework throws exception - LOGGER.error(e.getMessage(), e); - throw new IllegalStateException("Error starting workload generation.", e); - } - } - } - }; - } - - /** - * Stop the workload distributor. - */ - public void stop() { - this.client.close(); - } - -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java deleted file mode 100644 index 2eaa1d487f67ae8325a3622a7ae6c4529fbb1cd6..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/dimensions/KeySpace.java +++ /dev/null @@ -1,56 +0,0 @@ -package theodolite.commons.workloadgeneration.dimensions; - -import theodolite.commons.workloadgeneration.generators.AbstractWorkloadGenerator; - -/** - * Wrapper class for the definition of the Keys that should be used by the - * {@link AbstractWorkloadGenerator}. - */ -public class KeySpace { - - private final String prefix; - private final int min; - private final int max; - - - /** - * Create a new key space. All keys will have the prefix {@code prefix}. The remaining part of - * each key will be determined by a number of the interval ({@code min}, {@code max}-1). - * - * @param prefix the prefix to use for all keys - * @param min the lower bound (inclusive) to start counting from - * @param max the upper bound (exclusive) to count to - */ - public KeySpace(final String prefix, final int min, final int max) { - if (prefix == null || prefix.contains(";")) { - throw new IllegalArgumentException( - "The prefix must not be null and must not contain the ';' character."); - } - this.prefix = prefix; - this.min = min; - this.max = max; - - } - - public KeySpace(final String prefix, final int numberOfKeys) { - this(prefix, 0, numberOfKeys - 1); - } - - public KeySpace(final int numberOfKeys) { - this("sensor_", 0, numberOfKeys - 1); - } - - public String getPrefix() { - return this.prefix; - } - - - public int getMin() { - return this.min; - } - - - public int getMax() { - return this.max; - } -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java deleted file mode 100644 index 672b579ebbdf3cbb08f3d05d9511c9077f9dac6b..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/functions/MessageGenerator.java +++ /dev/null @@ -1,14 +0,0 @@ -package theodolite.commons.workloadgeneration.functions; - -/** - * This interface describes a function that takes meta information from a string (e.g. an ID) and - * produces an object of type T. - * - * @param <T> the type of the objects that will be generated by the function. - */ -@FunctionalInterface -public interface MessageGenerator<T> { - - T generateMessage(final String key); - -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java deleted file mode 100644 index 104f1cefb34200a2cf34d1578faecdfdae6ccd56..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/AbstractWorkloadGenerator.java +++ /dev/null @@ -1,138 +0,0 @@ -package theodolite.commons.workloadgeneration.generators; - -import java.time.Duration; -import java.util.LinkedList; -import java.util.List; -import java.util.Random; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.function.BiConsumer; -import java.util.function.BiFunction; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import theodolite.commons.workloadgeneration.communication.zookeeper.WorkloadDistributor; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.functions.BeforeAction; -import theodolite.commons.workloadgeneration.functions.MessageGenerator; -import theodolite.commons.workloadgeneration.functions.Transport; -import theodolite.commons.workloadgeneration.misc.WorkloadDefinition; -import theodolite.commons.workloadgeneration.misc.WorkloadEntity; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; - -/** - * Base for workload generators. - * - * @param <T> The type of records the workload generator is dedicated for. - */ -public abstract class AbstractWorkloadGenerator<T> - implements WorkloadGenerator { - - private static final Logger LOGGER = LoggerFactory.getLogger(AbstractWorkloadGenerator.class); - - private final int instances; // NOPMD keep instance variable instead of local variable - private final ZooKeeper zooKeeper; // NOPMD keep instance variable instead of local variable - private final KeySpace keySpace;// NOPMD keep instance variable instead of local variable - private final BeforeAction beforeAction; // NOPMD keep instance variable instead of local variable - private final BiFunction<WorkloadDefinition, Integer, List<WorkloadEntity<T>>> workloadSelector; - private final MessageGenerator<T> generatorFunction; - private final Transport<T> transport; - private WorkloadDistributor workloadDistributor; // NOPMD keep instance variable instead of local - private final ScheduledExecutorService executor; - - /** - * Create a new workload generator. - * - * @param instances the number of workload-generator instances. - * @param zooKeeper the zookeeper connection. - * @param keySpace the keyspace. - * @param threads the number of threads that is used to generate the load. - * @param period the period, how often a new record is emitted. - * @param duration the maximum runtime. - * @param beforeAction the action to perform before the workload generation starts. - * @param generatorFunction the function that is used to generate the individual records. - * @param transport the function that is used to send generated messages to the messaging system. - */ - public AbstractWorkloadGenerator( - final int instances, - final ZooKeeper zooKeeper, - final KeySpace keySpace, - final int threads, - final Duration period, - final Duration duration, - final BeforeAction beforeAction, - final MessageGenerator<T> generatorFunction, - final Transport<T> transport) { - this.instances = instances; - this.zooKeeper = zooKeeper; - this.keySpace = keySpace; - this.beforeAction = beforeAction; - this.generatorFunction = generatorFunction; - this.workloadSelector = (workloadDefinition, workerId) -> { - final List<WorkloadEntity<T>> workloadEntities = new LinkedList<>(); - - for (int i = - workloadDefinition.getKeySpace().getMin() + workerId; i <= workloadDefinition - .getKeySpace().getMax(); i += workloadDefinition.getNumberOfWorkers()) { - final String id = workloadDefinition.getKeySpace().getPrefix() + i; - workloadEntities.add(new WorkloadEntity<>(id, this.generatorFunction)); - } - - return workloadEntities; - }; - this.transport = transport; - - this.executor = Executors.newScheduledThreadPool(threads); - final Random random = new Random(); - - final int periodMs = (int) period.toMillis(); - - LOGGER.info("Period: {}", periodMs); - - final BiConsumer<WorkloadDefinition, Integer> workerAction = (declaration, workerId) -> { - - final List<WorkloadEntity<T>> entities = this.workloadSelector.apply(declaration, workerId); - - LOGGER.info("Beginning of Experiment..."); - LOGGER.info("Generating records for {} keys.", entities.size()); - LOGGER.info("Experiment is going to be executed for the specified duration..."); - - entities.forEach(entity -> { - final long initialDelay = random.nextInt(periodMs); - final Runnable task = () -> this.transport.transport(entity.generateMessage()); - this.executor.scheduleAtFixedRate(task, initialDelay, periodMs, TimeUnit.MILLISECONDS); - }); - - - try { - this.executor.awaitTermination(duration.getSeconds(), TimeUnit.SECONDS); - LOGGER.info("Terminating now..."); - this.stop(); - } catch (final InterruptedException e) { - LOGGER.error("", e); - throw new IllegalStateException("Error when terminating the workload generation.", e); - } - }; - - this.workloadDistributor = new WorkloadDistributor( - this.instances, - this.zooKeeper, - this.keySpace, - this.beforeAction, - workerAction); - } - - /** - * Start the workload generation. The generation terminates automatically after the specified - * {@code duration}. - */ - @Override - public void start() { - this.workloadDistributor.start(); - } - - @Override - public void stop() { - this.workloadDistributor.stop(); - } -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java deleted file mode 100644 index 944cec6a2dffed886f06fad1e36c9d35375fe15c..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGenerator.java +++ /dev/null @@ -1,59 +0,0 @@ -package theodolite.commons.workloadgeneration.generators; - -import java.time.Duration; -import org.apache.avro.specific.SpecificRecord; -import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.functions.BeforeAction; -import theodolite.commons.workloadgeneration.functions.MessageGenerator; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; - -/** - * Workload generator for generating load for the kafka messaging system. - * - * @param <T> The type of records the workload generator is dedicated for. - */ -public class KafkaWorkloadGenerator<T extends SpecificRecord> - extends AbstractWorkloadGenerator<T> { - - private final KafkaRecordSender<T> recordSender; - - /** - * Create a new workload generator. - * - * @param zooKeeper a reference to the ZooKeeper instance. - * @param keySpace the key space to generate the workload for. - * @param threads tha amount of threads to use per instance. - * @param period the period how often a message is generated for each key specified in the - * {@code keySpace} - * @param duration the duration how long the workload generator will emit messages. - * @param beforeAction the action which will be performed before the workload generator starts - * generating messages. If {@code null}, no before action will be performed. - * @param generatorFunction the generator function. This function is executed, each time a message - * is generated. - * @param recordSender the record sender which is used to send the generated messages to kafka. - */ - public KafkaWorkloadGenerator( - final int instances, - final ZooKeeper zooKeeper, - final KeySpace keySpace, - final int threads, - final Duration period, - final Duration duration, - final BeforeAction beforeAction, - final MessageGenerator<T> generatorFunction, - final KafkaRecordSender<T> recordSender) { - super(instances, zooKeeper, keySpace, threads, period, duration, beforeAction, - generatorFunction, - recordSender); - this.recordSender = recordSender; - } - - - @Override - public void stop() { - this.recordSender.terminate(); - - super.stop(); - } -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java deleted file mode 100644 index 785087c13480b7149a5726dfce8bbf4307b57933..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/KafkaWorkloadGeneratorBuilder.java +++ /dev/null @@ -1,185 +0,0 @@ -package theodolite.commons.workloadgeneration.generators; - -import java.time.Duration; -import java.util.Objects; -import org.apache.avro.specific.SpecificRecord; -import theodolite.commons.workloadgeneration.communication.kafka.KafkaRecordSender; -import theodolite.commons.workloadgeneration.dimensions.KeySpace; -import theodolite.commons.workloadgeneration.functions.BeforeAction; -import theodolite.commons.workloadgeneration.functions.MessageGenerator; -import theodolite.commons.workloadgeneration.misc.ZooKeeper; - -/** - * Builder for {@link workload generators}. - * - * @param <T> the record for which the builder is dedicated for. - */ -public final class KafkaWorkloadGeneratorBuilder<T extends SpecificRecord> { // NOPMD - - private int instances; // NOPMD - private ZooKeeper zooKeeper; // NOPMD - private KeySpace keySpace; // NOPMD - private int threads; // NOPMD - private Duration period; // NOPMD - private Duration duration; // NOPMD - private BeforeAction beforeAction; // NOPMD - private MessageGenerator<T> generatorFunction; // NOPMD - private KafkaRecordSender<T> kafkaRecordSender; // NOPMD - - private KafkaWorkloadGeneratorBuilder() { - - } - - /** - * Get a builder for the {@link KafkaWorkloadGenerator}. - * - * @return the builder. - */ - public static <T extends SpecificRecord> KafkaWorkloadGeneratorBuilder<T> builder() { - return new KafkaWorkloadGeneratorBuilder<>(); - } - - /** - * Set the number of instances. - * - * @param instances the number of instances. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> instances(final int instances) { - this.instances = instances; - return this; - } - - /** - * Set the ZooKeeper reference. - * - * @param zooKeeper a reference to the ZooKeeper instance. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> zooKeeper(final ZooKeeper zooKeeper) { - this.zooKeeper = zooKeeper; - return this; - } - - /** - * Set the before action for the {@link KafkaWorkloadGenerator}. - * - * @param beforeAction the {@link BeforeAction}. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> beforeAction(final BeforeAction beforeAction) { - this.beforeAction = beforeAction; - return this; - } - - /** - * Set the key space for the {@link KafkaWorkloadGenerator}. - * - * @param keySpace the {@link KeySpace}. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> keySpace(final KeySpace keySpace) { - this.keySpace = keySpace; - return this; - } - - /** - * Set the key space for the {@link KafkaWorkloadGenerator}. - * - * @param threads the number of threads. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> threads(final int threads) { - this.threads = threads; - return this; - } - - /** - * Set the period for the {@link KafkaWorkloadGenerator}. - * - * @param period the {@link Period} - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> period(final Duration period) { - this.period = period; - return this; - } - - /** - * Set the durtion for the {@link KafkaWorkloadGenerator}. - * - * @param duration the {@link Duration}. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> duration(final Duration duration) { - this.duration = duration; - return this; - } - - /** - * Set the generator function for the {@link KafkaWorkloadGenerator}. - * - * @param generatorFunction the generator function. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> generatorFunction( - final MessageGenerator<T> generatorFunction) { - this.generatorFunction = generatorFunction; - return this; - } - - /** - * Set the {@link KafkaRecordSender} for the {@link KafkaWorkloadGenerator}. - * - * @param kafkaRecordSender the record sender to use. - * @return the builder. - */ - public KafkaWorkloadGeneratorBuilder<T> kafkaRecordSender( - final KafkaRecordSender<T> kafkaRecordSender) { - this.kafkaRecordSender = kafkaRecordSender; - return this; - } - - /** - * Build the actual {@link KafkaWorkloadGenerator}. The following parameters are must be - * specicified before this method is called: - * <ul> - * <li>zookeeper</li> - * <li>key space</li> - * <li>period</li> - * <li>duration</li> - * <li>generator function</li> - * <li>kafka record sender</li> - * </ul> - * - * @return the built instance of the {@link KafkaWorkloadGenerator}. - */ - public KafkaWorkloadGenerator<T> build() { - if (this.instances < 1) { // NOPMD - throw new IllegalArgumentException( - "Please specify a valid number of instances. Currently: " + this.instances); - } - Objects.requireNonNull(this.zooKeeper, "Please specify the ZooKeeper instance."); - if (this.threads < 1) { // NOPMD - this.threads = 1; - } - Objects.requireNonNull(this.keySpace, "Please specify the key space."); - Objects.requireNonNull(this.period, "Please specify the period."); - Objects.requireNonNull(this.duration, "Please specify the duration."); - this.beforeAction = Objects.requireNonNullElse(this.beforeAction, () -> { - }); - Objects.requireNonNull(this.generatorFunction, "Please specify the generator function."); - Objects.requireNonNull(this.kafkaRecordSender, "Please specify the kafka record sender."); - - return new KafkaWorkloadGenerator<>( - this.instances, - this.zooKeeper, - this.keySpace, - this.threads, - this.period, - this.duration, - this.beforeAction, - this.generatorFunction, - this.kafkaRecordSender); - } -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java deleted file mode 100644 index b121ac157b84d64818d9fdfc90589d49fd933752..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/generators/WorkloadGenerator.java +++ /dev/null @@ -1,18 +0,0 @@ -package theodolite.commons.workloadgeneration.generators; - -/** - * Base methods for workload generators. - */ -public interface WorkloadGenerator { - - /** - * Start the workload generation. - */ - void start(); - - /** - * Stop the workload generation. - */ - void stop(); - -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java deleted file mode 100644 index 86369d6c883954b792b2ee0fd6a988377ecb8965..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadDefinition.java +++ /dev/null @@ -1,71 +0,0 @@ -package theodolite.commons.workloadgeneration.misc; - -import theodolite.commons.workloadgeneration.dimensions.KeySpace; - -/** - * The central class that contains all information that needs to be exchanged between the nodes for - * distributed workload generation. - */ -public class WorkloadDefinition { - private static final int ZERO = 0; - private static final int ONE = 1; - private static final int TWO = 2; - private static final int THREE = 3; - private static final int FOUR = 4; - - private final KeySpace keySpace; - private final int numberOfWorkers; - - /** - * Create a new workload definition. - * - * @param keySpace the key space to use. - * @param numberOfWorkers the number of workers participating in the workload generation. - */ - public WorkloadDefinition(final KeySpace keySpace, final int numberOfWorkers) { - - this.keySpace = keySpace; - this.numberOfWorkers = numberOfWorkers; - } - - public KeySpace getKeySpace() { - return this.keySpace; - } - - public int getNumberOfWorkers() { - return this.numberOfWorkers; - } - - /** - * Simple method for encoding all information of the workload definition into one string. - * - * @return a string that encodes all information of the workload generation in a compact format. - * The format is 'keySpace;keySpace.min;keySpace.max;numberOfWorkers'. - */ - @Override - public String toString() { - return this.getKeySpace().getPrefix() + ";" + this.getKeySpace().getMin() + ";" - + this.getKeySpace().getMax() + ";" + this.getNumberOfWorkers(); - } - - /** - * Parse a workload generation from a previously encoded string with the format returned by - * {@link WorkloadDefinition#toString()}. - * - * @param workloadDefinitionString the workload definition string. - * @return the parsed workload definition. - */ - public static WorkloadDefinition fromString(final String workloadDefinitionString) { - final String[] deserialized = workloadDefinitionString.split(";"); - - if (deserialized.length != FOUR) { - throw new IllegalArgumentException( - "Wrong workload definition string when trying to parse the workload generation."); - } - - return new WorkloadDefinition( - new KeySpace(deserialized[ZERO], Integer.valueOf(deserialized[ONE]), - Integer.valueOf(deserialized[TWO])), - Integer.valueOf(deserialized[THREE])); - } -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java deleted file mode 100644 index d8665b3fb53e7d15ed61780e3b91fbfe56f709ba..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/WorkloadEntity.java +++ /dev/null @@ -1,22 +0,0 @@ -package theodolite.commons.workloadgeneration.misc; - -import theodolite.commons.workloadgeneration.functions.MessageGenerator; - -/** - * Representation of a entity of the workload generation that generates load for one fixed key. - * - * @param <T> The type of records the workload generator is dedicated for. - */ -public class WorkloadEntity<T> { - private final String key; - private final MessageGenerator<T> generator; - - public WorkloadEntity(final String key, final MessageGenerator<T> generator) { - this.key = key; - this.generator = generator; - } - - public T generateMessage() { - return this.generator.generateMessage(this.key); - } -} diff --git a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java b/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java deleted file mode 100644 index a80490600ad9c9c22c198fc76b6d9f73bdc30584..0000000000000000000000000000000000000000 --- a/benchmarks/workload-generator-commons/src/main/java/theodolite/commons/workloadgeneration/misc/ZooKeeper.java +++ /dev/null @@ -1,29 +0,0 @@ -package theodolite.commons.workloadgeneration.misc; - -/** - * Wrapper for connection information for ZooKeeper. - */ -public class ZooKeeper { - - private final String host; - private final int port; - - /** - * Create a new representation of an ZooKeeper instance. - * - * @param host of zookeeper. - * @param port of zookeeper. - */ - public ZooKeeper(final String host, final int port) { - this.host = host; - this.port = port; - } - - public String getHost() { - return this.host; - } - - public int getPort() { - return this.port; - } -} diff --git a/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java b/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java new file mode 100644 index 0000000000000000000000000000000000000000..20c094ddcc7ff110a25aaffa494766e89d4d2475 --- /dev/null +++ b/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/KeySpaceTest.java @@ -0,0 +1,30 @@ +package theodolite.commons.workloadgeneration; + +import org.junit.Assert; +import org.junit.Test; +import theodolite.commons.workloadgeneration.KeySpace; + +public class KeySpaceTest { + + @Test + public void testCountFixedRangeFromZero() { + final KeySpace keySpace = new KeySpace("prefix", 0, 9); + final int count = keySpace.getCount(); + Assert.assertEquals(10, count); + } + + @Test + public void testCountFixedRangeNotFromZero() { + final KeySpace keySpace = new KeySpace("prefix", 4, 11); + final int count = keySpace.getCount(); + Assert.assertEquals(8, count); + } + + @Test + public void testCountAutoRange() { + final KeySpace keySpace = new KeySpace("prefix", 42); + final int count = keySpace.getCount(); + Assert.assertEquals(42, count); + } + +} diff --git a/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java b/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java new file mode 100644 index 0000000000000000000000000000000000000000..9a5dbf2d20e9e33b5902e5f352dc8a4023478cdf --- /dev/null +++ b/benchmarks/workload-generator-commons/src/test/java/theodolite/commons/workloadgeneration/WorkloadDefinitionTest.java @@ -0,0 +1,97 @@ +package theodolite.commons.workloadgeneration; + +import java.time.Duration; +import java.util.Comparator; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.junit.Assert; +import org.junit.Test; + +public class WorkloadDefinitionTest { + + @Test + public void testDivideByOneAmount() { + final KeySpace keySpace = new KeySpace("prefix", 100); + final WorkloadDefinition workload = new WorkloadDefinition(keySpace, Duration.ofSeconds(1)); + final Set<WorkloadDefinition> subworkloads = workload.divide(1); + Assert.assertEquals(1, subworkloads.size()); + } + + @Test + public void testDivideMultipleAmount() { + final KeySpace keySpace = new KeySpace("prefix", 100); + final WorkloadDefinition workload = new WorkloadDefinition(keySpace, Duration.ofSeconds(1)); + final Set<WorkloadDefinition> subworkloads = workload.divide(2); + Assert.assertEquals(2, subworkloads.size()); + } + + @Test + public void testDivideNonMultipleAmount() { + final KeySpace keySpace = new KeySpace("prefix", 100); + final WorkloadDefinition workload = new WorkloadDefinition(keySpace, Duration.ofSeconds(1)); + final Set<WorkloadDefinition> subworkloads = workload.divide(3); + Assert.assertEquals(3, subworkloads.size()); + } + + @Test + public void testDivide() { + final KeySpace keySpace = new KeySpace("prefix", 100); + final WorkloadDefinition workload = new WorkloadDefinition(keySpace, Duration.ofSeconds(1)); + final Set<WorkloadDefinition> subworkloads = workload.divide(3); + Assert.assertEquals(3, subworkloads.size()); + for (final WorkloadDefinition subworkload : subworkloads) { + Assert.assertEquals("prefix", subworkload.getKeySpace().getPrefix()); + Assert.assertEquals(Duration.ofSeconds(1), subworkload.getPeriod()); + } + final List<WorkloadDefinition> orderedSubworkloads = subworkloads.stream() + .sorted(Comparator.comparingInt(l -> l.getKeySpace().getMin())) + .collect(Collectors.toList()); + final WorkloadDefinition subworkload1 = orderedSubworkloads.get(0); + Assert.assertEquals(0, subworkload1.getKeySpace().getMin()); + Assert.assertEquals(33, subworkload1.getKeySpace().getMax()); + final WorkloadDefinition subworkload2 = orderedSubworkloads.get(1); + Assert.assertEquals(34, subworkload2.getKeySpace().getMin()); + Assert.assertEquals(66, subworkload2.getKeySpace().getMax()); + final WorkloadDefinition subworkload3 = orderedSubworkloads.get(2); + Assert.assertEquals(67, subworkload3.getKeySpace().getMin()); + Assert.assertEquals(99, subworkload3.getKeySpace().getMax()); + } + + @Test + public void testDivideMany() { + final KeySpace keySpace = new KeySpace("prefix", 10); + final WorkloadDefinition workload = new WorkloadDefinition(keySpace, Duration.ofSeconds(1)); + final Set<WorkloadDefinition> subworkloads = workload.divide(7); + Assert.assertEquals(7, subworkloads.size()); + for (final WorkloadDefinition subworkload : subworkloads) { + Assert.assertEquals("prefix", subworkload.getKeySpace().getPrefix()); + Assert.assertEquals(Duration.ofSeconds(1), subworkload.getPeriod()); + } + final List<WorkloadDefinition> orderedSubworkloads = subworkloads.stream() + .sorted(Comparator.comparingInt(l -> l.getKeySpace().getMin())) + .collect(Collectors.toList()); + final WorkloadDefinition subworkload1 = orderedSubworkloads.get(0); + Assert.assertEquals(0, subworkload1.getKeySpace().getMin()); + Assert.assertEquals(1, subworkload1.getKeySpace().getMax()); + final WorkloadDefinition subworkload2 = orderedSubworkloads.get(1); + Assert.assertEquals(2, subworkload2.getKeySpace().getMin()); + Assert.assertEquals(3, subworkload2.getKeySpace().getMax()); + final WorkloadDefinition subworkload3 = orderedSubworkloads.get(2); + Assert.assertEquals(4, subworkload3.getKeySpace().getMin()); + Assert.assertEquals(5, subworkload3.getKeySpace().getMax()); + final WorkloadDefinition subworkload4 = orderedSubworkloads.get(3); + Assert.assertEquals(6, subworkload4.getKeySpace().getMin()); + Assert.assertEquals(6, subworkload4.getKeySpace().getMax()); + final WorkloadDefinition subworkload5 = orderedSubworkloads.get(4); + Assert.assertEquals(7, subworkload5.getKeySpace().getMin()); + Assert.assertEquals(7, subworkload5.getKeySpace().getMax()); + final WorkloadDefinition subworkload6 = orderedSubworkloads.get(5); + Assert.assertEquals(8, subworkload6.getKeySpace().getMin()); + Assert.assertEquals(8, subworkload6.getKeySpace().getMax()); + final WorkloadDefinition subworkload7 = orderedSubworkloads.get(6); + Assert.assertEquals(9, subworkload7.getKeySpace().getMin()); + Assert.assertEquals(9, subworkload7.getKeySpace().getMax()); + } + +} diff --git a/codemeta.json b/codemeta.json new file mode 100644 index 0000000000000000000000000000000000000000..eff1f1ba4f3c9a70a46c3cf83c47c279e1838cf9 --- /dev/null +++ b/codemeta.json @@ -0,0 +1,34 @@ +{ + "@context": "https://doi.org/10.5063/schema/codemeta-2.0", + "@type": "SoftwareSourceCode", + "license": "https://spdx.org/licenses/Apache-2.0", + "codeRepository": "https://github.com/cau-se/theodolite", + "dateCreated": "2020-03-13", + "datePublished": "2020-07-27", + "dateModified": "2021-02-11", + "downloadUrl": "https://github.com/cau-se/theodolite/releases", + "name": "Theodolite", + "version": "0.3.0", + "description": "Theodolite is a framework for benchmarking the horizontal and vertical scalability of stream processing engines.", + "developmentStatus": "active", + "referencePublication": "https://doi.org/10.1016/j.bdr.2021.100209", + "programmingLanguage": [ + "Python", + "Java" + ], + "runtimePlatform": [ + "Kubernetes" + ], + "author": [ + { + "@type": "Person", + "givenName": "Sören", + "familyName": "Henning", + "email": "soeren.henning@email.uni-kiel.de", + "affiliation": { + "@type": "Organization", + "name": "Department of Computer Science, Kiel University" + } + } + ] +} diff --git a/docker-test/uc1-docker-compose/docker-compose.yml b/docker-test/uc1-docker-compose/docker-compose.yml index 905e6e30bfd38900e896be45d8a4b15389b2f54f..cdc9df40257362934a93fcbe2de24b6035d40bca 100755 --- a/docker-test/uc1-docker-compose/docker-compose.yml +++ b/docker-test/uc1-docker-compose/docker-compose.yml @@ -26,8 +26,8 @@ services: - kafka expose: - "8081" - ports: - - 8081:8081 + #ports: + # - 8081:8081 environment: SCHEMA_REGISTRY_HOST_NAME: schema-registry SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181' @@ -38,16 +38,15 @@ services: - kafka environment: KAFKA_BOOTSTRAP_SERVERS: kafka:9092 + SCHEMA_REGISTRY_URL: http://schema-registry:8081 uc-wg: image: theodolite/theodolite-uc1-workload-generator:latest depends_on: - schema-registry - kafka - - zookeeper environment: - ZK_HOST: zookeeper - ZK_PORT: 2181 + BOOTSTRAP_SERVER: uc-wg:5701 + PORT: 5701 KAFKA_BOOTSTRAP_SERVERS: kafka:9092 SCHEMA_REGISTRY_URL: http://schema-registry:8081 - INSTANCES: 1 - NUM_SENSORS: 1 + NUM_SENSORS: 10 diff --git a/docker-test/uc2-docker-compose/docker-compose.yml b/docker-test/uc2-docker-compose/docker-compose.yml index e6511bfd9fa7ea1e62bf9f3787ac6f3c0acc0107..613553fcfa53122205b6e58d85fb7225eae90d7c 100755 --- a/docker-test/uc2-docker-compose/docker-compose.yml +++ b/docker-test/uc2-docker-compose/docker-compose.yml @@ -1,17 +1,18 @@ version: '2' services: zookeeper: + #image: wurstmeister/zookeeper image: confluentinc/cp-zookeeper - expose: - - "9092" + ports: + - "2181:2181" environment: ZOOKEEPER_CLIENT_PORT: 2181 kafka: image: wurstmeister/kafka expose: - "9092" - ports: - - 19092:19092 + #ports: + # - 19092:19092 environment: KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092 @@ -24,10 +25,10 @@ services: depends_on: - zookeeper - kafka + #ports: + # - "8081:8081" expose: - "8081" - ports: - - 8081:8081 environment: SCHEMA_REGISTRY_HOST_NAME: schema-registry SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181' @@ -38,16 +39,16 @@ services: - kafka environment: KAFKA_BOOTSTRAP_SERVERS: kafka:9092 + SCHEMA_REGISTRY_URL: http://schema-registry:8081 + KAFKA_WINDOW_DURATION_MINUTES: 60 uc-wg: image: theodolite/theodolite-uc2-workload-generator:latest depends_on: - schema-registry - kafka - - zookeeper environment: - ZK_HOST: zookeeper - ZK_PORT: 2181 + BOOTSTRAP_SERVER: uc-wg:5701 + PORT: 5701 KAFKA_BOOTSTRAP_SERVERS: kafka:9092 SCHEMA_REGISTRY_URL: http://schema-registry:8081 - INSTANCES: 1 - NUM_SENSORS: 1 \ No newline at end of file + NUM_SENSORS: 10 diff --git a/docker-test/uc3-docker-compose/docker-compose.yml b/docker-test/uc3-docker-compose/docker-compose.yml index 9d2da8e87621c1902ff101efd42ff52436416b77..d321318b4024b678cf8f37007e90dc62a2042ece 100755 --- a/docker-test/uc3-docker-compose/docker-compose.yml +++ b/docker-test/uc3-docker-compose/docker-compose.yml @@ -9,12 +9,10 @@ services: ZOOKEEPER_CLIENT_PORT: 2181 kafka: image: wurstmeister/kafka - ports: - - "9092:9092" expose: - "9092" - ports: - - 19092:19092 + #ports: + # - 19092:19092 environment: KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092 @@ -27,8 +25,8 @@ services: depends_on: - zookeeper - kafka - ports: - - "8081:8081" + #ports: + # - "8081:8081" expose: - "8081" environment: @@ -42,17 +40,14 @@ services: environment: KAFKA_BOOTSTRAP_SERVERS: kafka:9092 SCHEMA_REGISTRY_URL: http://schema-registry:8081 - KAFKA_WINDOW_DURATION_MINUTES: 60 uc-wg: image: theodolite/theodolite-uc3-workload-generator:latest depends_on: - schema-registry - kafka - - zookeeper environment: - ZK_HOST: zookeeper - ZK_PORT: 2181 + BOOTSTRAP_SERVER: uc-wg:5701 + PORT: 5701 KAFKA_BOOTSTRAP_SERVERS: kafka:9092 SCHEMA_REGISTRY_URL: http://schema-registry:8081 - INSTANCES: 1 - NUM_SENSORS: 1 \ No newline at end of file + NUM_SENSORS: 10 diff --git a/docker-test/uc4-docker-compose/docker-compose.yml b/docker-test/uc4-docker-compose/docker-compose.yml index 530852b2df5ef2c70f03a11ac2445ce587a3760f..d478d74e55a1b5423a390c624848b20f5faf2969 100755 --- a/docker-test/uc4-docker-compose/docker-compose.yml +++ b/docker-test/uc4-docker-compose/docker-compose.yml @@ -1,18 +1,17 @@ version: '2' services: zookeeper: - #image: wurstmeister/zookeeper image: confluentinc/cp-zookeeper - ports: - - "2181:2181" + expose: + - "2181" environment: ZOOKEEPER_CLIENT_PORT: 2181 kafka: image: wurstmeister/kafka expose: - "9092" - ports: - - 19092:19092 + #ports: + # - 19092:19092 environment: KAFKA_LISTENERS: PLAINTEXT://:9092,CONNECTIONS_FROM_HOST://:19092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:9092,CONNECTIONS_FROM_HOST://localhost:19092 @@ -25,10 +24,10 @@ services: depends_on: - zookeeper - kafka - ports: - - "8081:8081" expose: - "8081" + #ports: + # - 8081:8081 environment: SCHEMA_REGISTRY_HOST_NAME: schema-registry SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: 'zookeeper:2181' @@ -45,11 +44,10 @@ services: depends_on: - schema-registry - kafka - - zookeeper environment: - ZK_HOST: zookeeper - ZK_PORT: 2181 + BOOTSTRAP_SERVER: uc-wg:5701 + PORT: 5701 KAFKA_BOOTSTRAP_SERVERS: kafka:9092 SCHEMA_REGISTRY_URL: http://schema-registry:8081 - INSTANCES: 1 - NUM_SENSORS: 100 + NUM_SENSORS: 4 + NUM_NESTED_GROUPS: 4 \ No newline at end of file diff --git a/docs/release-process.md b/docs/release-process.md index c53ea4423eb1dbf521d13286448f33a9613b71ef..961106247fd0967a2dd6ffdd980e35235ceed168 100644 --- a/docs/release-process.md +++ b/docs/release-process.md @@ -6,18 +6,25 @@ nav_order: 2 # Release Process -We assume that we are creating the release `v0.1.1`. Please make sure to adjust +This document describes how to perform a new Theodolite release. + +We assume that we are creating the release `v0.3.1`. Please make sure to adjust the following steps according to the release, you are actually performing. -1. Create a new branch `v0.1` if not already exists. This branch will never +1. Update `codemeta.json` to match the new version. In particular, make sure that `version` points to the version you are releasing and `dateModified` points to the date you are relasing this version. [CodeMeata generator](https://codemeta.github.io/codemeta-generator/) may help you in updating the file. + +2. Update `CITATION.cff` to match the new version. At least update the `version` field. + +3. Create a new branch `v0.3` if it does not already exists. This branch will never again be merged into master. -2. Checkout the `v0.1` branch. +4. Checkout the `v0.3` branch. + +5. Update all references to Theodolite Docker images to tag `v0.3.1`. These are the Kubernetes resource definitions in +`execution`, the references to *latest* in `run_uc.py`, the Docker Compose files in `docker-test` and the example `theodolite.yaml` job. -3. Update all references to Theodolite Docker images to tag `v0.1.1`. These are -mainly the Kubernetes resource definitions in `execution` as well as the Docker -Compose files in `docker-test`. +6. Commit these changes. -4. Commit these changes. +7. Tag this commit with `v0.3.1`. The corresponding Docker images will be uploaded. -5. Tag this commit with `v0.1.1`. The corresponding Docker images will be uploaded. +8. Create *releases* for this tag in both, GitLab and GitHub. diff --git a/execution/.gitlab-ci.yml b/execution/.gitlab-ci.yml deleted file mode 100644 index 5577de7a083708a6bb9b83571f458e2c1fbfb340..0000000000000000000000000000000000000000 --- a/execution/.gitlab-ci.yml +++ /dev/null @@ -1,61 +0,0 @@ -stages: - - deploy - -deploy: - stage: deploy - tags: - - exec-dind - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - DOCKER_TLS_CERTDIR: "/certs" - script: - - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//') - - docker build --pull -t theodolite ./execution - - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:${DOCKER_TAG_NAME}latest" - - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA" - - "[ $CI_COMMIT_TAG ] && docker tag theodolite $DOCKERHUB_ORG/theodolite:$CI_COMMIT_TAG" - - echo $DOCKERHUB_PW | docker login -u $DOCKERHUB_ID --password-stdin - - docker push $DOCKERHUB_ORG/theodolite - - docker logout - rules: - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW && $CI_COMMIT_TAG" - when: always - - changes: - - execution/**/* - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW" - when: always - - if: "$DOCKERHUB_ORG && $DOCKERHUB_ID && $DOCKERHUB_PW" - when: manual - allow_failure: true - -deploy-ghcr: - stage: deploy - tags: - - exec-dind - image: docker:19.03.1 - services: - - docker:19.03.1-dind - variables: - DOCKER_TLS_CERTDIR: "/certs" - script: - - DOCKER_TAG_NAME=$(echo $CI_COMMIT_REF_SLUG- | sed 's/^master-$//') - - docker build --pull -t theodolite ./execution - - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite ghcr.io/$GITHUB_CR_ORG/theodolite:${DOCKER_TAG_NAME}latest" - - "[ ! $CI_COMMIT_TAG ] && docker tag theodolite ghcr.io/$GITHUB_CR_ORG/theodolite:$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA" - - "[ $CI_COMMIT_TAG ] && docker tag theodolite ghcr.io/$GITHUB_CR_ORG/theodolite:$CI_COMMIT_TAG" - - echo $GITHUB_CR_TOKEN | docker login ghcr.io -u $GITHUB_CR_USER --password-stdin - - docker push ghcr.io/$GITHUB_CR_ORG/theodolite - - docker logout - rules: - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN && $CI_COMMIT_TAG" - when: always - - changes: - - execution/**/* - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN" - when: always - - if: "$GITHUB_CR_ORG && $GITHUB_CR_USER && $GITHUB_CR_TOKEN" - when: manual - allow_failure: true - \ No newline at end of file diff --git a/execution/README.md b/execution/README.md index 358ce270400d1e4e4947a8ef736feac74c314163..442f1c71929f9c7367909ce6609c9122faf3e814 100644 --- a/execution/README.md +++ b/execution/README.md @@ -84,7 +84,7 @@ not want to deploy 10 Kafka and 3 Zookeeper instances, alter the configuration file accordingly. To install the patched Confluent's Kafka with our configuration: ```sh -helm install my-confluent https://github.com/SoerenHenning/cp-helm-charts/releases/download/v6.0.1-1-JMX-FIX/cp-helm-charts-0.6.0.tgz -f infrastructure/kafka/values.yaml +helm install my-confluent https://github.com/SoerenHenning/cp-helm-charts/releases/download/v6.0.1-1-JMX-FIX-2/cp-helm-charts-0.6.0.tgz -f infrastructure/kafka/values.yaml ``` To let Prometheus scrape Kafka metrics, deploy a ServiceMonitor: @@ -121,7 +121,9 @@ can be installed via Helm. We also provide a [default configuration](infrastruct To install it: ```sh -helm install kafka-lag-exporter https://github.com/lightbend/kafka-lag-exporter/releases/download/v0.6.3/kafka-lag-exporter-0.6.3.tgz -f infrastructure/kafka-lag-exporter/values.yaml +helm repo add kafka-lag-exporter https://lightbend.github.io/kafka-lag-exporter/repo/ +helm repo update +helm install kafka-lag-exporter kafka-lag-exporter/kafka-lag-exporter -f infrastructure/kafka-lag-exporter/values.yaml ``` ### Installing Theodolite @@ -168,18 +170,47 @@ access (e.g. via SSH) to one of your cluster nodes. You first need to create a directory on a selected node where all benchmark results should be stored. Next, modify `infrastructure/kubernetes/volume-local.yaml` by setting `<node-name>` to your selected node. (This node will most likely also execute the [Theodolite job](#Execution).) Further, you have to set `path` to the directory on the node you just created. To deploy -you volume run: +your volume run: ```sh kubectl apply -f infrastructure/kubernetes/volume-local.yaml ``` +##### *Oracle Cloud Infrastructure* volume + +When you are running in the Oracle Cloud, you can provision a persistent volume claim by attaching a volume from the +Oracle Cloud Infrastructure Block Volume service. To create your volume, run: + +```sh +kubectl apply -f infrastructure/kubernetes/volume-oci.yaml +``` + +More information can be found in the official documentation: +[Oracle Cloud Infrastructure: Creating a Persistent Volume Claim](https://docs.oracle.com/en-us/iaas/Content/ContEng/Tasks/contengcreatingpersistentvolumeclaim.htm) + ##### Other volumes -To use volumes provided by public cloud providers or network-based file systems, you can use the definitions in +To use volumes provided by other public cloud providers or network-based file systems, you can use the definitions in `infrastructure/kubernetes/` as a starting point. See the offical [volumes documentation](https://kubernetes.io/docs/concepts/storage/volumes/) for additional information. +##### Accessing benchmark results via Kubernetes + +In cases where you do not have direct access to the underlying storage infrasturcture of your volume (e.g., if your +admin configures a local or hostPath volume for you and you do not have SSH access to the node), you can deploy our +Theodolite results access deployment: + +```sh +kubectl apply -f infrastructure/kubernetes/volume-access.yaml +``` + +It allows you to browse the benchmark results or copy files your Kubernetes client via the following commands: + +```sh +kubectl exec -it $(kubectl get pods -o=name -l app=theodolite-results-access) -- sh +kubectl cp $(kubectl get pods --no-headers -o custom-columns=":metadata.name" -l app=theodolite-results-access):app/results <target-dir> +``` + ## Execution @@ -216,11 +247,12 @@ Kubernetes volume. | --duration | DURATION | Duration in minutes subexperiments should be executed for. *Default:* `5`. | | --partitions | PARTITIONS | Number of partitions for Kafka topics. *Default:* `40`. | | --cpu-limit | CPU_LIMIT | Kubernetes CPU limit for a single Pod. *Default:* `1000m`. | -| --memory-limiT | MEMORY_LIMIT | Kubernetes memory limit for a single Pod. *Default:* `4Gi`. | +| --memory-limit | MEMORY_LIMIT | Kubernetes memory limit for a single Pod. *Default:* `4Gi`. | | --domain-restriction | DOMAIN_RESTRICTION | A flag that indiciates domain restriction should be used. *Default:* not set. For more details see Section [Domain Restriction](#domain-restriction). | | --search-strategy | SEARCH_STRATEGY | The benchmarking search strategy. Can be set to `check-all`, `linear-search` or `binary-search`. *Default:* `check-all`. For more details see Section [Benchmarking Search Strategies](#benchmarking-search-strategies). | | --reset | RESET | Resets the environment before each subexperiment. Useful if execution was aborted and just one experiment should be executed. | | --reset-only | RESET_ONLY | Only resets the environment. Ignores all other parameters. Useful if execution was aborted and one want a clean state for new executions. | +| --namespace | NAMESPACE | Kubernetes namespace. *Default:* `default`. | | --prometheus | PROMETHEUS_BASE_URL | Defines where to find the prometheus instance. *Default:* `http://localhost:9090` | | --path | RESULT_PATH | A directory path for the results. Relative to the Execution folder. *Default:* `results` | | --configurations | CONFIGURATIONS | Defines environment variables for the use cases and, thus, enables further configuration options. | diff --git a/execution/infrastructure/kubernetes/volume-access.yaml b/execution/infrastructure/kubernetes/volume-access.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54c996160726504b0965af791c74cff11a860c8e --- /dev/null +++ b/execution/infrastructure/kubernetes/volume-access.yaml @@ -0,0 +1,30 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: theodolite-results-access + labels: + app: theodolite-results-access +spec: + replicas: 1 + selector: + matchLabels: + app: theodolite-results-access + template: + metadata: + labels: + app: theodolite-results-access + spec: + containers: + - name: theodolite-results-access + image: busybox:latest + command: + - sh + - -c + - exec tail -f /dev/null + volumeMounts: + - mountPath: /app/results + name: theodolite-pv-storage + volumes: + - name: theodolite-pv-storage + persistentVolumeClaim: + claimName: theodolite-pv-claim diff --git a/execution/infrastructure/kubernetes/volume-oci-access.yaml b/execution/infrastructure/kubernetes/volume-oci-access.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c129600f8c6168f06ddcf2865ff29bc4e3c942c --- /dev/null +++ b/execution/infrastructure/kubernetes/volume-oci-access.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: theodolite-results-access +spec: + restartPolicy: Always + containers: + - name: theodolite-results-access + image: busybox:latest + command: + - sh + - -c + - exec tail -f /dev/null + volumeMounts: + - mountPath: /app/results + name: theodolite-pv-storage + volumes: + - name: theodolite-pv-storage + persistentVolumeClaim: + claimName: theodolite-pv-claim diff --git a/execution/infrastructure/kubernetes/volume-oci.yaml b/execution/infrastructure/kubernetes/volume-oci.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39d267011661b56021f7e716d860ab427608ed05 --- /dev/null +++ b/execution/infrastructure/kubernetes/volume-oci.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: theodolite-pv-claim +spec: + storageClassName: "oci-bv" + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi diff --git a/execution/infrastructure/prometheus/helm-values.yaml b/execution/infrastructure/prometheus/helm-values.yaml index bf503fe483e918ac7a6a7dc8722ea06cfd3aef6c..a356a455a14238c1aeb97cbe022a69715a5cbd97 100644 --- a/execution/infrastructure/prometheus/helm-values.yaml +++ b/execution/infrastructure/prometheus/helm-values.yaml @@ -36,6 +36,9 @@ nodeExporter: prometheusOperator: enabled: true + namespaces: + releaseNamespace: true + additional: [] prometheus: enabled: false diff --git a/execution/run_uc.py b/execution/run_uc.py index a0fcdbb6d57e5dc67d18e69b7d07fcdbfa809307..904b87b377ca2db3f2d4ddd4fb70aba0136cfa21 100644 --- a/execution/run_uc.py +++ b/execution/run_uc.py @@ -94,14 +94,15 @@ def load_yaml_files(): :return: wg, app_svc, app_svc_monitor ,app_jmx, app_deploy """ print('Load kubernetes yaml files') - wg = load_yaml('uc-workload-generator/base/workloadGenerator.yaml') - app_svc = load_yaml('uc-application/base/aggregation-service.yaml') - app_svc_monitor = load_yaml('uc-application/base/service-monitor.yaml') - app_jmx = load_yaml('uc-application/base/jmx-configmap.yaml') - app_deploy = load_yaml('uc-application/base/aggregation-deployment.yaml') + wg_svc = load_yaml('uc-workload-generator/load-generator-service.yaml') + wg = load_yaml('uc-workload-generator/workloadGenerator.yaml') + app_svc = load_yaml('uc-application/aggregation-service.yaml') + app_svc_monitor = load_yaml('uc-application/service-monitor.yaml') + app_jmx = load_yaml('uc-application/jmx-configmap.yaml') + app_deploy = load_yaml('uc-application/aggregation-deployment.yaml') print('Kubernetes yaml files loaded') - return wg, app_svc, app_svc_monitor, app_jmx, app_deploy + return wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy def replace_env_value(container, key, value): @@ -113,8 +114,9 @@ def replace_env_value(container, key, value): 'value'] = value -def start_workload_generator(wg_yaml, dim_value, uc_id): +def start_workload_generator(svc_yaml, wg_yaml, dim_value, uc_id): """Starts the workload generator. + :param wg_yaml: The yaml object for the workload generator service. :param wg_yaml: The yaml object for the workload generator. :param string dim_value: The dimension value the load generator should use. :param string uc_id: Use case id for which load should be generated. @@ -123,14 +125,25 @@ def start_workload_generator(wg_yaml, dim_value, uc_id): the yaml object. """ print('Start workload generator') + svc, wg_deploy = None, None + # Create Service + try: + svc = coreApi.create_namespaced_service( + namespace=namespace, body=svc_yaml) + print(f'Service {svc.metadata.name} created.') + except client.rest.ApiException as e: + svc = svc_yaml + logging.error("Service creation error: %s", e.reason) + + # Create Deployment num_sensors = dim_value wl_max_records = 150000 wl_instances = (num_sensors + wl_max_records - 1) // wl_max_records - # set parameters special for uc 2 - if uc_id == '2': - print('use uc2 stuff') + # set parameters special for uc 4 + if uc_id == '4': + print('use uc4 stuff') num_nested_groups = dim_value num_sensors = 4 approx_num_sensors = num_sensors ** num_nested_groups @@ -147,22 +160,22 @@ def start_workload_generator(wg_yaml, dim_value, uc_id): # Set environment variables replace_env_value(wg_containter['env'], 'NUM_SENSORS', str(num_sensors)) - replace_env_value(wg_containter['env'], 'INSTANCES', str(wl_instances)) - if uc_id == '2': # Special configuration for uc2 + if uc_id == '4': # Special configuration for UC4 replace_env_value( wg_containter['env'], 'NUM_NESTED_GROUPS', str(num_nested_groups)) try: - wg_ss = appsApi.create_namespaced_deployment( + wg_deploy = appsApi.create_namespaced_deployment( namespace=namespace, body=wg_yaml ) - print(f'Deployment {wg_ss.metadata.name} created.') - return wg_ss + print(f'Deployment {wg_deploy.metadata.name} created.') except client.rest.ApiException as e: print(f'Deployment creation error: {e.reason}') - return wg_yaml + wg_deploy = wg_yaml + + return svc, wg_deploy def start_application(svc_yaml, svc_monitor_yaml, jmx_yaml, deploy_yaml, @@ -317,19 +330,23 @@ def delete_resource(obj, del_func): print('Resource deleted') -def stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy): +def stop_applications(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy): """Stops the applied applications and delete resources. - :param wg: The workload generator statefull set. + :param wg: The load generator service. + :param wg: The load generator deployment. :param app_svc: The application service. :param app_svc_monitor: The application service monitor. :param app_jmx: The application jmx config map. :param app_deploy: The application deployment. """ - print('Stop use case application and workload generator') + print('Stop use case application and load generator') - print('Delete workload generator') + print('Delete load generator deployment') delete_resource(wg, appsApi.delete_namespaced_deployment) + print('Delete load generator service') + delete_resource(wg_svc, coreApi.delete_namespaced_service) + print('Delete app service') delete_resource(app_svc, coreApi.delete_namespaced_service) @@ -492,12 +509,12 @@ def stop_lag_exporter(): return -def reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics): +def reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics): """ Stop the applications, delete topics, reset zookeeper and stop lag exporter. """ print('Reset cluster') - stop_applications(wg, app_svc, app_svc_monitor, app_jmx, app_deploy) + stop_applications(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy) print('---------------------') delete_topics(topics) print('---------------------') @@ -524,7 +541,7 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi """ global namespace namespace = ns - wg, app_svc, app_svc_monitor, app_jmx, app_deploy = load_yaml_files() + wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy = load_yaml_files() print('---------------------') initialize_kubernetes_api() @@ -538,24 +555,24 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi # Check for reset options if reset_only: # Only reset cluster an then end program - reset_cluster(wg, app_svc, app_svc_monitor, + reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics) sys.exit() if reset: # Reset cluster before execution print('Reset only mode') - reset_cluster(wg, app_svc, app_svc_monitor, + reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics) print('---------------------') # Register the reset operation so that is executed at the abort of program - atexit.register(reset_cluster, wg, app_svc, + atexit.register(reset_cluster, wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics) create_topics(topics) print('---------------------') - wg = start_workload_generator(wg, dim_value, uc_id) + wg_svc, wg = start_workload_generator(wg_svc, wg, dim_value, uc_id) print('---------------------') app_svc, app_svc_monitor, app_jmx, app_deploy = start_application( @@ -578,7 +595,7 @@ def main(exp_id, uc_id, dim_value, instances, partitions, cpu_limit, memory_limi print('---------------------') # Reset cluster regular, therefore abort exit not needed anymore - reset_cluster(wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics) + reset_cluster(wg_svc, wg, app_svc, app_svc_monitor, app_jmx, app_deploy, topics) atexit.unregister(reset_cluster) diff --git a/execution/run_uc1.sh b/execution/run_uc1.sh deleted file mode 100755 index 02c46d8832fc800c57453570b14a6bf02681326a..0000000000000000000000000000000000000000 --- a/execution/run_uc1.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_SENSORS=$DIM_VALUE -WL_MAX_RECORDS=150000 -WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "$NUM_SENSORS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc1-workload-generator - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc1-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc1-application - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc1 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc1-workload-generator -kubectl delete -k uc-application/overlay/uc1-application - - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc2.sh b/execution/run_uc2.sh deleted file mode 100755 index 4544d3609ed807141455378b92ce3536ea2f92f6..0000000000000000000000000000000000000000 --- a/execution/run_uc2.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic aggregation-feedback --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_NESTED_GROUPS=$DIM_VALUE -WL_MAX_RECORDS=150000 -APPROX_NUM_SENSORS=$((4**NUM_NESTED_GROUPS)) -WL_INSTANCES=$(((APPROX_NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "4" - - name: HIERARCHY - value: "full" - - name: NUM_NESTED_GROUPS - value: "$NUM_NESTED_GROUPS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc2-workload-generator - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc2-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc2-application - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc2 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc2-workload-generator -kubectl delete -k uc-application/overlay/uc2-application - - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|aggregation-feedback|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|aggregation-feedback|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc3.sh b/execution/run_uc3.sh deleted file mode 100755 index 4f2323f937f19d01a73482dea6aeaf5e922a0a3f..0000000000000000000000000000000000000000 --- a/execution/run_uc3.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_SENSORS=$DIM_VALUE -WL_MAX_RECORDS=150000 -WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "$NUM_SENSORS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc3-workload-generator - - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc3-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc3-application -kubectl scale deployment uc3-titan-ccp-aggregation --replicas=$REPLICAS - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc3 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc3-workload-generator -kubectl delete -k uc-application/overlay/uc3-application - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/run_uc4.sh b/execution/run_uc4.sh deleted file mode 100755 index 08a38498839ef3c50a39c1ccfbd26914993ffbd3..0000000000000000000000000000000000000000 --- a/execution/run_uc4.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - -EXP_ID=$1 -DIM_VALUE=$2 -INSTANCES=$3 -PARTITIONS=${4:-40} -CPU_LIMIT=${5:-1000m} -MEMORY_LIMIT=${6:-4Gi} -KAFKA_STREAMS_COMMIT_INTERVAL_MS=${7:-100} -EXECUTION_MINUTES=${8:-5} - -echo "EXP_ID: $EXP_ID" -echo "DIM_VALUE: $DIM_VALUE" -echo "INSTANCES: $INSTANCES" -echo "PARTITIONS: $PARTITIONS" -echo "CPU_LIMIT: $CPU_LIMIT" -echo "MEMORY_LIMIT: $MEMORY_LIMIT" -echo "KAFKA_STREAMS_COMMIT_INTERVAL_MS: $KAFKA_STREAMS_COMMIT_INTERVAL_MS" -echo "EXECUTION_MINUTES: $EXECUTION_MINUTES" - -# Create Topics -#PARTITIONS=40 -#kubectl run temp-kafka --rm --attach --restart=Never --image=solsson/kafka --command -- bash -c "./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; ./bin/kafka-topics.sh --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" -PARTITIONS=$PARTITIONS -kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic input --partitions $PARTITIONS --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic configuration --partitions 1 --replication-factor 1; kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --create --topic output --partitions $PARTITIONS --replication-factor 1" - -# Start workload generator -NUM_SENSORS=$DIM_VALUE -WL_MAX_RECORDS=150000 -WL_INSTANCES=$(((NUM_SENSORS + (WL_MAX_RECORDS -1 ))/ WL_MAX_RECORDS)) - -cat <<EOF >uuc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: $WL_INSTANCES - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "$NUM_SENSORS" - - name: INSTANCES - value: "$WL_INSTANCES" -EOF -kubectl apply -k uc-workload-generator/overlay/uc4-workload-generator - -# Start application -REPLICAS=$INSTANCES -cat <<EOF >uc-application/overlay/uc4-application/set_paramters.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: $REPLICAS - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "$KAFKA_STREAMS_COMMIT_INTERVAL_MS" - resources: - limits: - memory: $MEMORY_LIMIT - cpu: $CPU_LIMIT -EOF -kubectl apply -k uc-application/overlay/uc4-application -kubectl scale deployment uc4-titan-ccp-aggregation --replicas=$REPLICAS - -# Execute for certain time -sleep $(($EXECUTION_MINUTES * 60)) - -# Run eval script -source ../.venv/bin/activate -python lag_analysis.py $EXP_ID uc4 $DIM_VALUE $INSTANCES $EXECUTION_MINUTES -deactivate - -# Stop workload generator and app -kubectl delete -k uc-workload-generator/overlay/uc4-workload-generator -kubectl delete -k uc-application/overlay/uc4-application - -# Delete topics instead of Kafka -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -# kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic '.*' -#sleep 30s # TODO check -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n '/^titan-.*/p;/^input$/p;/^output$/p;/^configuration$/p' | wc -l -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" - -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input,output,configuration,titan-.*'" -echo "Finished execution, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' -while test $(kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(theodolite-.*|input|output|configuration)( - marked for deletion)?$/p' | wc -l) -gt 0 -do - kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --delete --topic 'input|output|configuration|theodolite-.*' --if-exists" - echo "Wait for topic deletion" - sleep 5s - #echo "Finished waiting, print topics:" - #kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - # Sometimes a second deletion seems to be required -done -echo "Finish topic deletion, print topics:" -#kubectl exec kafka-client -- bash -c "kafka-topics --zookeeper my-confluent-cp-zookeeper:2181 --list" | sed -n -E '/^(titan-.*|input|output|configuration)( - marked for deletion)?$/p' - -# delete zookeeper nodes used for workload generation -echo "Delete ZooKeeper configurations used for workload generation" -kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 deleteall /workload-generation" -echo "Waiting for deletion" -while kubectl exec zookeeper-client -- bash -c "zookeeper-shell my-confluent-cp-zookeeper:2181 get /workload-generation" -do - echo "Wait for ZooKeeper state deletion." - sleep 5s -done -echo "Deletion finished" - -echo "Exiting script" - -KAFKA_LAG_EXPORTER_POD=$(kubectl get pod -l app.kubernetes.io/name=kafka-lag-exporter -o jsonpath="{.items[0].metadata.name}") -kubectl delete pod $KAFKA_LAG_EXPORTER_POD diff --git a/execution/uc-application/base/aggregation-deployment.yaml b/execution/uc-application/aggregation-deployment.yaml similarity index 100% rename from execution/uc-application/base/aggregation-deployment.yaml rename to execution/uc-application/aggregation-deployment.yaml diff --git a/execution/uc-application/base/aggregation-service.yaml b/execution/uc-application/aggregation-service.yaml similarity index 100% rename from execution/uc-application/base/aggregation-service.yaml rename to execution/uc-application/aggregation-service.yaml diff --git a/execution/uc-application/base/kustomization.yaml b/execution/uc-application/base/kustomization.yaml deleted file mode 100644 index 24c89cfdafb17cdc91f65198b9faf3665bfc6822..0000000000000000000000000000000000000000 --- a/execution/uc-application/base/kustomization.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -commonLabels: - app: titan-ccp-aggregation - -# Use all resources to compose them into one file -resources: - - aggregation-deployment.yaml - - aggregation-service.yaml - - service-monitor.yaml - - jmx-configmap.yaml diff --git a/execution/uc-application/base/jmx-configmap.yaml b/execution/uc-application/jmx-configmap.yaml similarity index 100% rename from execution/uc-application/base/jmx-configmap.yaml rename to execution/uc-application/jmx-configmap.yaml diff --git a/execution/uc-application/overlay/uc1-application/kustomization.yaml b/execution/uc-application/overlay/uc1-application/kustomization.yaml deleted file mode 100644 index 0d3820fe392e1d2224d78a8dd2415c4dce37c6e6..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc1-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc1- - -images: - - name: uc-app - newName: theodolite/theodolite-uc1-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc1-application/set_paramters.yaml b/execution/uc-application/overlay/uc1-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc1-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/overlay/uc2-application/kustomization.yaml b/execution/uc-application/overlay/uc2-application/kustomization.yaml deleted file mode 100644 index cd32cabf70fdfa666a5703c97bc4e4fad7800ba7..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc2-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc2- - -images: - - name: uc-app - newName: theodolite/theodolite-uc2-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc2-application/set_paramters.yaml b/execution/uc-application/overlay/uc2-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc2-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/overlay/uc3-application/kustomization.yaml b/execution/uc-application/overlay/uc3-application/kustomization.yaml deleted file mode 100644 index 5722cbca8cc79247063921a55252435804edefe6..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc3-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc3- - -images: - - name: uc-app - newName: theodolite/theodolite-uc3-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc3-application/set_paramters.yaml b/execution/uc-application/overlay/uc3-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc3-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/overlay/uc4-application/kustomization.yaml b/execution/uc-application/overlay/uc4-application/kustomization.yaml deleted file mode 100644 index b44a9bb643802735b740b74bdb47299fb413e5d3..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc4-application/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc4- - -images: - - name: uc-app - newName: theodolite/theodolite-uc4-kstreams-app - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-application/overlay/uc4-application/set_paramters.yaml b/execution/uc-application/overlay/uc4-application/set_paramters.yaml deleted file mode 100644 index cb85048128774ab421b89338d5b1ce23791acac8..0000000000000000000000000000000000000000 --- a/execution/uc-application/overlay/uc4-application/set_paramters.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-aggregation -spec: - replicas: 1 - template: - spec: - containers: - - name: uc-application - env: - - name: COMMIT_INTERVAL_MS - value: "100" - resources: - limits: - memory: 4Gi - cpu: 1000m diff --git a/execution/uc-application/base/service-monitor.yaml b/execution/uc-application/service-monitor.yaml similarity index 100% rename from execution/uc-application/base/service-monitor.yaml rename to execution/uc-application/service-monitor.yaml diff --git a/execution/uc-workload-generator/base/kustomization.yaml b/execution/uc-workload-generator/base/kustomization.yaml deleted file mode 100644 index 2a2c3de74db5afb7c70b440651b8c0c47720b755..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/base/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - workloadGenerator.yaml diff --git a/execution/uc-workload-generator/load-generator-service.yaml b/execution/uc-workload-generator/load-generator-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c1299e373009dee5fa4cc87093ebc684c7f2e333 --- /dev/null +++ b/execution/uc-workload-generator/load-generator-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: titan-ccp-load-generator + labels: + app: titan-ccp-load-generator +spec: + type: ClusterIP + clusterIP: None + selector: + app: titan-ccp-load-generator + ports: + - name: coordination + port: 5701 + targetPort: 5701 + protocol: TCP diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml deleted file mode 100644 index 553b769a3bacd3356d6b5af5ba2e865acdd47a7c..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc1-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc1- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc1-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml deleted file mode 100644 index b275607c27723b1e7e5e7e2b5c02942731bed809..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc1-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "25000" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml deleted file mode 100644 index ff68743355d55459f2df988e8dd42bf0b3b6ae64..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc2-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc2- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc2-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml deleted file mode 100644 index 187cb4717195537288e58035dcdda5f34fc9ceed..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc2-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "4" - - name: HIERARCHY - value: "full" - - name: NUM_NESTED_GROUPS - value: "5" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml deleted file mode 100644 index a7022480fcfe401f3e4e4c3898c3d79930198d3e..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc3-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc3- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc3-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml deleted file mode 100644 index b275607c27723b1e7e5e7e2b5c02942731bed809..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc3-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "25000" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml deleted file mode 100644 index 5efb0eb25a26371cdddfcc7969a2d10131dbb448..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc4-workload-generator/kustomization.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namePrefix: uc4- - -images: - - name: workload-generator - newName: theodolite/theodolite-uc4-workload-generator - newTag: latest - -bases: -- ../../base - -patchesStrategicMerge: -- set_paramters.yaml # Patch setting the resource parameters diff --git a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml b/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml deleted file mode 100644 index b275607c27723b1e7e5e7e2b5c02942731bed809..0000000000000000000000000000000000000000 --- a/execution/uc-workload-generator/overlay/uc4-workload-generator/set_paramters.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: titan-ccp-load-generator -spec: - replicas: 1 - template: - spec: - containers: - - name: workload-generator - env: - - name: NUM_SENSORS - value: "25000" - - name: INSTANCES - value: "1" diff --git a/execution/uc-workload-generator/base/workloadGenerator.yaml b/execution/uc-workload-generator/workloadGenerator.yaml similarity index 73% rename from execution/uc-workload-generator/base/workloadGenerator.yaml rename to execution/uc-workload-generator/workloadGenerator.yaml index 794468b18dc74ca09872577b5b3c115605bd4620..146e285f66d4c0e1a88d613e4ac2d5571234fad6 100644 --- a/execution/uc-workload-generator/base/workloadGenerator.yaml +++ b/execution/uc-workload-generator/workloadGenerator.yaml @@ -16,23 +16,22 @@ spec: containers: - name: workload-generator image: workload-generator:latest + ports: + - containerPort: 5701 + name: coordination env: # Order need to be preserved for run_uc.py - name: NUM_SENSORS value: "25000" - - name: INSTANCES - value: "1" - name: NUM_NESTED_GROUPS value: "5" - - name: ZK_HOST - value: "my-confluent-cp-zookeeper" - - name: ZK_PORT - value: "2181" + - name: KUBERNETES_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: KUBERNETES_DNS_NAME + value: "titan-ccp-load-generator.$(KUBERNETES_NAMESPACE).svc.cluster.local" - name: KAFKA_BOOTSTRAP_SERVERS value: "my-confluent-cp-kafka:9092" - name: SCHEMA_REGISTRY_URL value: "http://my-confluent-cp-schema-registry:8081" - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name diff --git a/theodolite-quarkus/build.gradle b/theodolite-quarkus/build.gradle index a63d3f9af4c45e45631a277bea38f8e9b5f17b7f..ba80ced4b1e4e58f34d5b316f4a46f4e032654a9 100644 --- a/theodolite-quarkus/build.gradle +++ b/theodolite-quarkus/build.gradle @@ -2,7 +2,8 @@ plugins { id 'org.jetbrains.kotlin.jvm' version "1.3.72" id "org.jetbrains.kotlin.plugin.allopen" version "1.3.72" id 'io.quarkus' - id "io.gitlab.arturbosch.detekt" version "1.15.0" + id "io.gitlab.arturbosch.detekt" version "1.15.0" //For code style + id "org.jlleitschuh.gradle.ktlint" version "10.0.0" // same as above } repositories { @@ -53,4 +54,8 @@ detekt { failFast = true // fail build on any finding buildUponDefaultConfig = true ignoreFailures = true +} + +ktlint { + ignoreFailures = true } \ No newline at end of file