Skip to content
Snippets Groups Projects
Commit 25e76502 authored by Sören Henning's avatar Sören Henning
Browse files

Merge branch 'master' into feature/172-abstract-flink-microservice

parents cab6297c d96b5c77
No related branches found
No related tags found
1 merge request!272Introduce Abstract Flink Service Class
Showing
with 1076 additions and 165 deletions
...@@ -48,7 +48,8 @@ default: ...@@ -48,7 +48,8 @@ default:
export PUBLISHED_IMAGE_TAG=$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA export PUBLISHED_IMAGE_TAG=$DOCKER_TAG_NAME$CI_COMMIT_SHORT_SHA
fi fi
- "[ $DOCKERFILE ] && KANIKO_DOCKERFILE=\"--dockerfile $DOCKERFILE\"" - "[ $DOCKERFILE ] && KANIKO_DOCKERFILE=\"--dockerfile $DOCKERFILE\""
- /kaniko/executor --context `pwd`/$CONTEXT $KANIKO_DOCKERFILE $KANIKO_D - "BUILD_ARGS=$(printenv | sed -n 's/BUILD_ARG_/--build-arg=/p')"
- /kaniko/executor --context `pwd`/$CONTEXT $KANIKO_DOCKERFILE $KANIKO_D $BUILD_ARGS
- echo "PUBLISHED_IMAGE_TAG=$PUBLISHED_IMAGE_TAG" >> $CI_PROJECT_DIR/build.env - echo "PUBLISHED_IMAGE_TAG=$PUBLISHED_IMAGE_TAG" >> $CI_PROJECT_DIR/build.env
artifacts: artifacts:
reports: reports:
...@@ -135,6 +136,38 @@ lint-helm: ...@@ -135,6 +136,38 @@ lint-helm:
- when: manual - when: manual
allow_failure: true allow_failure: true
test-helm:
stage: smoketest
extends: .dind
needs:
- lint-helm
# - deploy-theodolite
image: ghcr.io/cau-se/theodolite-build-k3d-helm:20.10.12
variables:
CLUSTERNAME: "$CI_PROJECT_NAME-$CI_PIPELINE_ID"
cache:
paths:
- helm/charts
before_script:
- k3d help
- k3d version
#- k3d cluster create testgitlabci --agents 1 --wait -p "30000:30000@agent[0]"
- k3d cluster create $CLUSTERNAME --agents 1 --wait -p "30000:30000@agent:0" # k3d 5.0
# show cluster info
- kubectl cluster-info
- helm version
script:
# Display initial pods, etc.
- cd helm
- helm dependencies update .
- helm install theodolite . -f preconfigs/minimal.yaml --wait
- helm test theodolite
- kubectl get nodes -o wide
- kubectl get pods --all-namespaces -o wide
- kubectl get services --all-namespaces -o wide
after_script:
- k3d cluster delete $CLUSTERNAME
# Theodolite Benchmarks # Theodolite Benchmarks
...@@ -235,7 +268,7 @@ spotbugs-benchmarks: ...@@ -235,7 +268,7 @@ spotbugs-benchmarks:
- changes: - changes:
- theodolite-benchmarks/* - theodolite-benchmarks/*
- theodolite-benchmarks/$JAVA_PROJECT_NAME/**/* - theodolite-benchmarks/$JAVA_PROJECT_NAME/**/*
- theodolite-benchmarks/{$JAVA_PROJECT_DEPS}/**/* - theodolite-benchmarks/{commons,$JAVA_PROJECT_DEPS}/**/*
if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $JAVA_PROJECT_DEPS" if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME && $JAVA_PROJECT_DEPS"
- if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME" - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $IMAGE_NAME && $JAVA_PROJECT_NAME"
when: manual when: manual
...@@ -851,3 +884,24 @@ deploy-buildimage-docker-compose-jq: ...@@ -851,3 +884,24 @@ deploy-buildimage-docker-compose-jq:
- if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_PIPELINE_SOURCE == 'web'" - if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_PIPELINE_SOURCE == 'web'"
when: manual when: manual
allow_failure: true allow_failure: true
deploy-buildimage-k3d-helm:
stage: deploy
extends:
- .kaniko-push
needs: []
variables:
DOCKER_VERSION: 20.10.12
BUILD_ARG_DOCKER_VERSION: $DOCKER_VERSION
BUILD_ARG_KUBECTL: v1.21.3
IMAGE_NAME: theodolite-build-k3d-helm
IMAGE_TAG: $DOCKER_VERSION
before_script:
- cd buildimages/k3d-helm
rules:
- changes:
- buildimages/k3d-helm/Dockerfile
if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW"
- if: "$CR_HOST && $CR_ORG && $CR_USER && $CR_PW && $CI_PIPELINE_SOURCE == 'web'"
when: manual
allow_failure: true
ARG DOCKER_VERSION=latest
FROM docker:${DOCKER_VERSION}
ARG KUBECTL_VERSION=v1.21.3
RUN apk add -U wget bash openssl
# install kubectl
RUN wget -q -O /usr/local/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl && \
chmod +x /usr/local/bin/kubectl
# install k3d
RUN wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
# install Helm
RUN wget -q -O - https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
...@@ -239,7 +239,7 @@ GEM ...@@ -239,7 +239,7 @@ GEM
jekyll-seo-tag (~> 2.1) jekyll-seo-tag (~> 2.1)
minitest (5.15.0) minitest (5.15.0)
multipart-post (2.1.1) multipart-post (2.1.1)
nokogiri (1.13.4-x86_64-linux) nokogiri (1.13.6-x86_64-linux)
racc (~> 1.4) racc (~> 1.4)
octokit (4.22.0) octokit (4.22.0)
faraday (>= 0.9) faraday (>= 0.9)
......
This diff is collapsed.
apiVersion: v1 apiVersion: v1
entries: entries:
theodolite: theodolite:
- apiVersion: v2
appVersion: 0.7.0
created: "2022-05-11T13:49:02.457130925+02:00"
dependencies:
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.17.5
- condition: kube-prometheus-stack.enabled
name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 20.0.1
- condition: cp-helm-charts.enabled
name: cp-helm-charts
repository: https://soerenhenning.github.io/cp-helm-charts
version: 0.6.0
- condition: strimzi.enabled
name: strimzi-kafka-operator
repository: https://strimzi.io/charts/
version: 0.28.0
description: Theodolite is a framework for benchmarking the horizontal and vertical
scalability of cloud-native applications.
digest: af10134baa30bb07423f78240fe1c609381e1c616585883cf5d3aded2d86a2b1
home: https://www.theodolite.rocks
maintainers:
- email: soeren.henning@email.uni-kiel.de
name: Sören Henning
url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
name: theodolite
sources:
- https://github.com/cau-se/theodolite
type: application
urls:
- https://github.com/cau-se/theodolite/releases/download/v0.7.0/theodolite-0.7.0%20copy.tgz
version: 0.7.0
- apiVersion: v2 - apiVersion: v2
appVersion: 0.7.0 appVersion: 0.7.0
created: "2022-05-11T13:49:02.491041789+02:00" created: "2022-05-11T13:49:02.491041789+02:00"
......
...@@ -102,4 +102,16 @@ kubectl delete crd prometheuses.monitoring.coreos.com ...@@ -102,4 +102,16 @@ kubectl delete crd prometheuses.monitoring.coreos.com
kubectl delete crd prometheusrules.monitoring.coreos.com kubectl delete crd prometheusrules.monitoring.coreos.com
kubectl delete crd servicemonitors.monitoring.coreos.com kubectl delete crd servicemonitors.monitoring.coreos.com
kubectl delete crd thanosrulers.monitoring.coreos.com kubectl delete crd thanosrulers.monitoring.coreos.com
# CRDs for Strimzi
kubectl delete crd kafkabridges.kafka.strimzi.io
kubectl delete crd kafkaconnectors.kafka.strimzi.io
kubectl delete crd kafkaconnects.kafka.strimzi.io
kubectl delete crd kafkamirrormaker2s.kafka.strimzi.io
kubectl delete crd kafkamirrormakers.kafka.strimzi.io
kubectl delete crd kafkarebalances.kafka.strimzi.io
kubectl delete crd kafkas.kafka.strimzi.io
kubectl delete crd kafkatopics.kafka.strimzi.io
kubectl delete crd kafkausers.kafka.strimzi.io
kubectl delete crd strimzipodsets.core.strimzi.io
``` ```
{{- if not (index .Values "strimzi-kafka-operator" "createGlobalResources") -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: strimzi
name: strimzi-cluster-operator-entity-operator-delegation-namespaced
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: strimzi-entity-operator-namespaced
subjects:
- kind: ServiceAccount
name: strimzi-cluster-operator
namespace: {{ .Release.Namespace }}
{{- end }}
{{- if not (index .Values "strimzi-kafka-operator" "createGlobalResources") -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: strimzi-entity-operator-namespaced
labels:
app: {{ template "theodolite.name" . }}-strimzi
rules:
- apiGroups:
- "kafka.strimzi.io"
resources:
# The entity operator runs the KafkaTopic assembly operator, which needs to access and manage KafkaTopic resources
- kafkatopics
- kafkatopics/status
# The entity operator runs the KafkaUser assembly operator, which needs to access and manage KafkaUser resources
- kafkausers
- kafkausers/status
verbs:
- get
- list
- watch
- create
- patch
- update
- delete
- apiGroups:
- ""
resources:
- events
verbs:
# The entity operator needs to be able to create events
- create
- apiGroups:
- ""
resources:
# The entity operator user-operator needs to access and manage secrets to store generated credentials
- secrets
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
{{- end }}
{{- if not (index .Values "strimzi-kafka-operator" "createGlobalResources") -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app: strimzi
name: strimzi-cluster-operator-namespaced
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: strimzi-cluster-operator-namespaced
subjects:
- kind: ServiceAccount
name: strimzi-cluster-operator
namespace: {{ .Release.Namespace }}
{{- end }}
{{- if not (index .Values "strimzi-kafka-operator" "createGlobalResources") -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: strimzi-cluster-operator-namespaced
labels:
app: strimzi-cluster-operator-namespaced
rules:
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
# The cluster operator needs to access and manage rolebindings to grant Strimzi components cluster permissions
- rolebindings
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- "rbac.authorization.k8s.io"
resources:
# The cluster operator needs to access and manage roles to grant the entity operator permissions
- roles
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- ""
resources:
# The cluster operator needs to access and delete pods, this is to allow it to monitor pod health and coordinate rolling updates
- pods
# The cluster operator needs to access and manage service accounts to grant Strimzi components cluster permissions
- serviceaccounts
# The cluster operator needs to access and manage config maps for Strimzi components configuration
- configmaps
# The cluster operator needs to access and manage services and endpoints to expose Strimzi components to network traffic
- services
- endpoints
# The cluster operator needs to access and manage secrets to handle credentials
- secrets
# The cluster operator needs to access and manage persistent volume claims to bind them to Strimzi components for persistent data
- persistentvolumeclaims
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- "kafka.strimzi.io"
resources:
# The cluster operator runs the KafkaAssemblyOperator, which needs to access and manage Kafka resources
- kafkas
- kafkas/status
# The cluster operator runs the KafkaConnectAssemblyOperator, which needs to access and manage KafkaConnect resources
- kafkaconnects
- kafkaconnects/status
# The cluster operator runs the KafkaConnectorAssemblyOperator, which needs to access and manage KafkaConnector resources
- kafkaconnectors
- kafkaconnectors/status
# The cluster operator runs the KafkaMirrorMakerAssemblyOperator, which needs to access and manage KafkaMirrorMaker resources
- kafkamirrormakers
- kafkamirrormakers/status
# The cluster operator runs the KafkaBridgeAssemblyOperator, which needs to access and manage BridgeMaker resources
- kafkabridges
- kafkabridges/status
# The cluster operator runs the KafkaMirrorMaker2AssemblyOperator, which needs to access and manage KafkaMirrorMaker2 resources
- kafkamirrormaker2s
- kafkamirrormaker2s/status
# The cluster operator runs the KafkaRebalanceAssemblyOperator, which needs to access and manage KafkaRebalance resources
- kafkarebalances
- kafkarebalances/status
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- "core.strimzi.io"
resources:
# The cluster operator uses StrimziPodSets to manage the Kafka and ZooKeeper pods
- strimzipodsets
- strimzipodsets/status
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
# The cluster operator needs the extensions api as the operator supports Kubernetes version 1.11+
# apps/v1 was introduced in Kubernetes 1.14
- "extensions"
resources:
# The cluster operator needs to access and manage deployments to run deployment based Strimzi components
- deployments
- deployments/scale
# The cluster operator needs to access replica sets to manage Strimzi components and to determine error states
- replicasets
# The cluster operator needs to access and manage replication controllers to manage replicasets
- replicationcontrollers
# The cluster operator needs to access and manage network policies to lock down communication between Strimzi components
- networkpolicies
# The cluster operator needs to access and manage ingresses which allow external access to the services in a cluster
- ingresses
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- "apps"
resources:
# The cluster operator needs to access and manage deployments to run deployment based Strimzi components
- deployments
- deployments/scale
- deployments/status
# The cluster operator needs to access and manage stateful sets to run stateful sets based Strimzi components
- statefulsets
# The cluster operator needs to access replica-sets to manage Strimzi components and to determine error states
- replicasets
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- ""
resources:
# The cluster operator needs to be able to create events and delegate permissions to do so
- events
verbs:
- create
- apiGroups:
# Kafka Connect Build on OpenShift requirement
- build.openshift.io
resources:
- buildconfigs
- buildconfigs/instantiate
- builds
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
# The cluster operator needs to access and manage network policies to lock down communication between Strimzi components
- networkpolicies
# The cluster operator needs to access and manage ingresses which allow external access to the services in a cluster
- ingresses
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- route.openshift.io
resources:
# The cluster operator needs to access and manage routes to expose Strimzi components for external access
- routes
- routes/custom-host
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- policy
resources:
# The cluster operator needs to access and manage pod disruption budgets this limits the number of concurrent disruptions
# that a Strimzi component experiences, allowing for higher availability
- poddisruptionbudgets
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
{{- end }}
{{- if .Values.prometheus.enabled }}
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
...@@ -5,7 +6,8 @@ metadata: ...@@ -5,7 +6,8 @@ metadata:
labels: labels:
{{- include "theodolite.labels" . | nindent 4 }} {{- include "theodolite.labels" . | nindent 4 }}
annotations: annotations:
"helm.sh/hook": test-success "helm.sh/hook": test
"helm.sh/hook-delete-policy": hook-succeeded
spec: spec:
containers: containers:
- name: wget - name: wget
...@@ -13,3 +15,4 @@ spec: ...@@ -13,3 +15,4 @@ spec:
command: ['wget'] command: ['wget']
args: ['http://prometheus-operated:9090'] args: ['http://prometheus-operated:9090']
restartPolicy: Never restartPolicy: Never
{{- end }}
...@@ -159,7 +159,7 @@ cp-helm-charts: ...@@ -159,7 +159,7 @@ cp-helm-charts:
pollIntervalSeconds: 15 pollIntervalSeconds: 15
strimzi-kafka-operator: strimzi-kafka-operator:
createGlobalResources: true createGlobalResources: false # Might disable some of Strimzi's features
strimzi: strimzi:
enabled: true enabled: true
......
...@@ -37,7 +37,7 @@ def aggr_query(values: dict, warmup: int, aggr_func): ...@@ -37,7 +37,7 @@ def aggr_query(values: dict, warmup: int, aggr_func):
df = pd.DataFrame.from_dict(values) df = pd.DataFrame.from_dict(values)
df.columns = ['timestamp', 'value'] df.columns = ['timestamp', 'value']
filtered = df[df['timestamp'] >= (df['timestamp'][0] + warmup)] filtered = df[df['timestamp'] >= (df['timestamp'][0] + warmup)]
filtered['value'] = filtered['value'].astype(int) filtered['value'] = filtered['value'].astype(float).astype(int)
return filtered['value'].aggregate(aggr_func) return filtered['value'].aggregate(aggr_func)
def check_result(result, operator: str, threshold): def check_result(result, operator: str, threshold):
......
...@@ -260,7 +260,7 @@ ...@@ -260,7 +260,7 @@
], ],
[ [
1.634624989695E9, 1.634624989695E9,
"1854284" "3970.0000000000005"
] ]
] ]
} }
......
...@@ -13,8 +13,7 @@ repositories { ...@@ -13,8 +13,7 @@ repositories {
} }
dependencies { dependencies {
implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true } implementation project(':commons')
implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
implementation group: 'org.apache.beam', name: 'beam-sdks-java-core', version: '2.35.0' implementation group: 'org.apache.beam', name: 'beam-sdks-java-core', version: '2.35.0'
implementation('org.apache.beam:beam-sdks-java-io-kafka:2.35.0'){ implementation('org.apache.beam:beam-sdks-java-io-kafka:2.35.0'){
......
...@@ -10,7 +10,7 @@ import org.apache.beam.sdk.options.PipelineOptionsFactory; ...@@ -10,7 +10,7 @@ import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.commons.configuration2.Configuration; import org.apache.commons.configuration2.Configuration;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import titan.ccp.common.configuration.ServiceConfigurations; import rocks.theodolite.benchmarks.commons.commons.configuration.ServiceConfigurations;
/** /**
* A general Apache Beam-based microservice. It is configured by Beam pipeline, a Beam runner and * A general Apache Beam-based microservice. It is configured by Beam pipeline, a Beam runner and
......
...@@ -2,7 +2,8 @@ package rocks.theodolite.benchmarks.commons.beam.kafka; ...@@ -2,7 +2,8 @@ package rocks.theodolite.benchmarks.commons.beam.kafka;
import io.confluent.kafka.streams.serdes.avro.SpecificAvroDeserializer; import io.confluent.kafka.streams.serdes.avro.SpecificAvroDeserializer;
import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.Deserializer;
import titan.ccp.model.records.ActivePowerRecord; import rocks.theodolite.benchmarks.commons.model.records.ActivePowerRecord;
/** /**
* A Kafka {@link Deserializer} for typed Schema Registry {@link ActivePowerRecord}. * A Kafka {@link Deserializer} for typed Schema Registry {@link ActivePowerRecord}.
......
...@@ -5,7 +5,7 @@ import org.apache.beam.sdk.io.kafka.KafkaRecord; ...@@ -5,7 +5,7 @@ import org.apache.beam.sdk.io.kafka.KafkaRecord;
import org.apache.beam.sdk.io.kafka.TimestampPolicy; import org.apache.beam.sdk.io.kafka.TimestampPolicy;
import org.apache.beam.sdk.transforms.windowing.BoundedWindow; import org.apache.beam.sdk.transforms.windowing.BoundedWindow;
import org.joda.time.Instant; import org.joda.time.Instant;
import titan.ccp.model.records.ActivePowerRecord; import rocks.theodolite.benchmarks.commons.model.records.ActivePowerRecord;
/** /**
* TimeStampPolicy to use event time based on the timestamp of the record value. * TimeStampPolicy to use event time based on the timestamp of the record value.
......
...@@ -8,7 +8,7 @@ import org.apache.beam.sdk.values.KV; ...@@ -8,7 +8,7 @@ import org.apache.beam.sdk.values.KV;
import org.apache.beam.sdk.values.PBegin; import org.apache.beam.sdk.values.PBegin;
import org.apache.beam.sdk.values.PCollection; import org.apache.beam.sdk.values.PCollection;
import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringDeserializer;
import titan.ccp.model.records.ActivePowerRecord; import rocks.theodolite.benchmarks.commons.model.records.ActivePowerRecord;
/** /**
* Simple {@link PTransform} that reads from Kafka using {@link KafkaIO} with event time. * Simple {@link PTransform} that reads from Kafka using {@link KafkaIO} with event time.
......
...@@ -22,8 +22,7 @@ def apacheBeamVersion = '2.35.0' ...@@ -22,8 +22,7 @@ def apacheBeamVersion = '2.35.0'
dependencies { dependencies {
// These dependencies are used internally, and not exposed to consumers on their own compile classpath. // These dependencies are used internally, and not exposed to consumers on their own compile classpath.
implementation('org.industrial-devops:titan-ccp-common:0.1.0-SNAPSHOT') { changing = true } implementation project(':commons')
implementation('org.industrial-devops:titan-ccp-common-kafka:0.1.0-SNAPSHOT') { changing = true }
implementation 'com.google.guava:guava:24.1-jre' implementation 'com.google.guava:guava:24.1-jre'
implementation 'org.slf4j:slf4j-simple:1.7.25' implementation 'org.slf4j:slf4j-simple:1.7.25'
implementation project(':beam-commons') implementation project(':beam-commons')
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment