Skip to content
Snippets Groups Projects
Commit 88403a62 authored by Sören Henning's avatar Sören Henning
Browse files

Merge branch 'feature/281-Add-Beam-Kubernetes-Benchmark-Definitions' into 'master'

Add Beam Kubernetes Benchmark Definitions

See merge request !218
parents bd7c1545 d159a9ea
No related branches found
No related tags found
1 merge request!218Add Beam Kubernetes Benchmark Definitions
Pipeline #6843 passed
Showing
with 546 additions and 7 deletions
......@@ -2,10 +2,12 @@ apiVersion: v1
kind: Pod
metadata:
name: zookeeper-client
labels:
app: zookeeper-client
spec:
containers:
- name: zookeeper-client
image: confluentinc/cp-zookeeper:5.4.0
image: zookeeper:3.7.0
command:
- sh
- -c
......
{{- if .Values.strimzi.zookeeper.zooEntrance.zookeeperClient.enabled -}}
apiVersion: v1
kind: Pod
metadata:
name: {{ template "theodolite.fullname" . }}-kafka-zookeeper-client
labels:
app: zookeeper-client
spec:
containers:
- name: zookeeper-client
image: zookeeper:3.7.0
command:
- sh
- -c
- "exec tail -f /dev/null"
env:
- name: ZOOKEEPER_SERVER
value: {{ template "theodolite.fullname" . }}-kafka-zoo-entrance:2181
{{- with .Values.strimzi.zookeeper.zooEntrance.zookeeperClient.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
\ No newline at end of file
{{- if .Values.strimzi.zookeeper.zooEntrance.enabled -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "theodolite.fullname" . }}-kafka-zoo-entrance
labels:
app: zoo-entrance
spec:
replicas: 1
selector:
matchLabels:
app: zoo-entrance
strategy:
type: Recreate
template:
metadata:
labels:
app: zoo-entrance
spec:
containers:
- name: zoo-entrance
image: 'ghcr.io/scholzj/zoo-entrance:latest'
command:
- /opt/stunnel/stunnel_run.sh
ports:
- containerPort: 2181
name: zoo
protocol: TCP
env:
- name: LOG_LEVEL
value: notice
- name: STRIMZI_ZOOKEEPER_CONNECT
value: {{ template "theodolite.fullname" . }}-kafka-zookeeper-client:2181
imagePullPolicy: Always
livenessProbe:
exec:
command:
- /opt/stunnel/stunnel_healthcheck.sh
- '2181'
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
exec:
command:
- /opt/stunnel/stunnel_healthcheck.sh
- '2181'
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
volumeMounts:
- mountPath: /etc/cluster-operator-certs/
name: cluster-operator-certs
- mountPath: /etc/cluster-ca-certs/
name: cluster-ca-certs
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:
- name: cluster-operator-certs
secret:
defaultMode: 288
secretName: {{ template "theodolite.fullname" . }}-kafka-cluster-operator-certs
- name: cluster-ca-certs
secret:
defaultMode: 288
secretName: {{ template "theodolite.fullname" . }}-kafka-cluster-ca-cert
{{- end }}
{{- if .Values.strimzi.zookeeper.zooEntrance.enabled -}}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
app: zoo-entrance
name: {{ template "theodolite.fullname" . }}-kafka-zoo-entrance
spec:
ingress:
- from:
- podSelector:
matchLabels:
app: zoo-entrance
ports:
- port: 2181
protocol: TCP
podSelector:
matchLabels:
strimzi.io/name: {{ template "theodolite.fullname" . }}-kafka-zookeeper
policyTypes:
- Ingress
{{- end }}
{{- if .Values.strimzi.zookeeper.zooEntrance.enabled -}}
apiVersion: v1
kind: Service
metadata:
labels:
app: zoo-entrance
name: {{ template "theodolite.fullname" . }}-kafka-zoo-entrance
spec:
ports:
- name: zoo
port: 2181
protocol: TCP
targetPort: 2181
selector:
app: zoo-entrance
type: ClusterIP
{{- end }}
......@@ -176,6 +176,11 @@ strimzi:
zookeeper:
replicas: 3
zooEntrance:
enabled: true
zookeeperClient:
enabled: true
nodeSelector: {}
###
......@@ -295,24 +300,40 @@ operator:
uc1LoadGenerator: true
uc1Kstreams: true
uc1Flink: true
uc1BeamFlink: true
uc1BeamSamza: true
uc2LoadGenerator: true
uc2Kstreams: true
uc2Flink: true
uc2BeamFlink: true
uc2BeamSamza: true
uc3LoadGenerator: true
uc3Kstreams: true
uc3Flink: true
uc3BeamFlink: true
uc3BeamSamza: true
uc4LoadGenerator: true
uc4Kstreams: true
uc4Flink: true
uc4BeamFlink: true
uc4BeamSamza: true
benchmarks:
uc1Kstreams: true
uc1Flink: true
uc1BeamFlink: true
uc1BeamSamza: true
uc2Kstreams: true
uc2Flink: true
uc2BeamFlink: true
uc2BeamSamza: true
uc3Kstreams: true
uc3Flink: true
uc3BeamFlink: true
uc3BeamSamza: true
uc4Kstreams: true
uc4Flink: true
uc4BeamFlink: true
uc4BeamSamza: true
serviceAccount:
create: true
......
......@@ -10,6 +10,18 @@ kubectl create configmap benchmark-resources-uc2-kstreams --from-file uc2-kstrea
kubectl create configmap benchmark-resources-uc3-kstreams --from-file uc3-kstreams/resources
kubectl create configmap benchmark-resources-uc4-kstreams --from-file uc4-kstreams/resources
# Beam Flink
kubectl create configmap benchmark-resources-uc1-beam-flink --from-file uc1-beam-flink/resources
kubectl create configmap benchmark-resources-uc2-beam-flink --from-file uc2-beam-flink/resources
kubectl create configmap benchmark-resources-uc3-beam-flink --from-file uc3-beam-flink/resources
kubectl create configmap benchmark-resources-uc4-beam-flink --from-file uc4-beam-flink/resources
# Beam Samza
kubectl create configmap benchmark-resources-uc1-beam-samza --from-file uc1-beam-samza/resources
kubectl create configmap benchmark-resources-uc2-beam-samza --from-file uc2-beam-samza/resources
kubectl create configmap benchmark-resources-uc3-beam-samza --from-file uc3-beam-samza/resources
kubectl create configmap benchmark-resources-uc4-beam-samza --from-file uc4-beam-samza/resources
# Load Generator
kubectl create configmap benchmark-resources-uc1-load-generator --from-file uc1-load-generator/resources
kubectl create configmap benchmark-resources-uc2-load-generator --from-file uc2-load-generator/resources
......
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
taskmanager.numberOfTaskSlots: 1 #TODO
#blob.server.port: 6124
#jobmanager.rpc.port: 6123
#taskmanager.rpc.port: 6122
#queryable-state.proxy.ports: 6125
#jobmanager.memory.process.size: 4Gb
#taskmanager.memory.process.size: 4Gb
#parallelism.default: 1 #TODO
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
taskmanager.network.detailed-metrics: true
# -> gives metrics about inbound/outbound network queue lengths
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
appender.rolling.name = RollingFileAppender
appender.rolling.type = RollingFile
appender.rolling.append = false
appender.rolling.fileName = ${sys:log.file}
appender.rolling.filePattern = ${sys:log.file}.%i
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
appender.rolling.policies.type = Policies
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size=100MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
\ No newline at end of file
apiVersion: apps/v1
kind: Deployment
metadata:
name: flink-jobmanager
spec:
replicas: 1
selector:
matchLabels:
app: flink
component: jobmanager
template:
metadata:
labels:
app: flink
component: jobmanager
spec:
terminationGracePeriodSeconds: 0
containers:
- name: jobmanager
image: ghcr.io/cau-se/theodolite-uc1-beam-flink:latest
args: ["standalone-job", "--job-classname", "application.Uc1BeamFlink",
"--parallelism=$(PARALLELISM)",
"--disableMetrics=true",
"--fasterCopy"]
# optional arguments: ["--job-id", "<job id>", "--fromSavepoint", "/path/to/savepoint", "--allowNonRestoredState"]
env:
- name: KAFKA_BOOTSTRAP_SERVERS
value: "theodolite-kafka-kafka-bootstrap:9092"
- name: SCHEMA_REGISTRY_URL
value: "http://theodolite-cp-schema-registry:8081"
- name: COMMIT_INTERVAL_MS
value: "100"
- name: CHECKPOINTING
value: "false"
- name: PARALLELISM
value: "1"
- name: "FLINK_STATE_BACKEND"
value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources:
limits:
memory: 4Gi
cpu: 1000m
ports:
- containerPort: 6123
name: rpc
- containerPort: 6124
name: blob-server
- containerPort: 8081
name: webui
- containerPort: 9249
name: metrics
livenessProbe:
tcpSocket:
port: 6123
initialDelaySeconds: 30
periodSeconds: 60
# volumeMounts:
# - name: flink-config-volume-rw
# mountPath: /opt/flink/conf
# - name: job-artifacts-volume
# mountPath: /opt/flink/usrlib
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
# initContainers:
# - name: init-jobmanager
# image: busybox:1.28
# command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
# volumeMounts:
# - name: flink-config-volume
# mountPath: /flink-config/
# - name: flink-config-volume-rw
# mountPath: /flink-config-rw/
# volumes:
# - name: flink-config-volume
# configMap:
# name: flink-config
# items:
# - key: flink-conf.yaml
# path: flink-conf.yaml
# - key: log4j-console.properties
# path: log4j-console.properties
# - name: flink-config-volume-rw
# emptyDir: {}
# - name: job-artifacts-volume
# hostPath:
# path: /host/path/to/job/artifacts
apiVersion: v1
kind: Service
metadata:
name: flink-jobmanager-rest
spec:
type: NodePort
ports:
- name: rest
port: 8081
targetPort: 8081
nodePort: 30081
selector:
app: flink
component: jobmanager
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: flink-jobmanager
labels:
app: flink
spec:
type: ClusterIP
ports:
- name: rpc
port: 6123
- name: blob-server
port: 6124
- name: webui
port: 8081
- name: metrics
port: 9249
selector:
app: flink
component: jobmanager
\ No newline at end of file
{{- if .Values.kafkaClient.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
app: cp-kafka
app: flink
appScope: titan-ccp
name: {{ template "theodolite.fullname" . }}-cp-kafka
name: flink
spec:
selector:
matchLabels:
app: cp-kafka
app: flink
endpoints:
- port: metrics
interval: 7s
{{- end}}
interval: 10s
apiVersion: apps/v1
kind: Deployment
metadata:
name: flink-taskmanager
spec:
replicas: 1
selector:
matchLabels:
app: flink
component: taskmanager
template:
metadata:
labels:
app: flink
component: taskmanager
spec:
terminationGracePeriodSeconds: 0
containers:
- name: taskmanager
image: ghcr.io/cau-se/theodolite-uc1-beam-flink:latest
args: ["taskmanager"]
env:
- name: KAFKA_BOOTSTRAP_SERVERS
value: "theodolite-kafka-kafka-bootstrap:9092"
- name: SCHEMA_REGISTRY_URL
value: "http://theodolite-cp-schema-registry:8081"
- name: COMMIT_INTERVAL_MS
value: "100"
- name: CHECKPOINTING
value: "false"
# - name: PARALLELISM
# value: "1"
- name: "FLINK_STATE_BACKEND"
value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
# - name: TASK_MANAGER_NUMBER_OF_TASK_SLOTS
# value: "1" #TODO
# - name: FLINK_PROPERTIES
# value: |+
# blob.server.port: 6124
# jobmanager.rpc.port: 6123
# taskmanager.rpc.port: 6122
# queryable-state.proxy.ports: 6125
# jobmanager.memory.process.size: 4Gb
# taskmanager.memory.process.size: 4Gb
# #parallelism.default: 1 #TODO
resources:
limits:
memory: 4Gi
cpu: 1000m
ports:
- containerPort: 6122
name: rpc
- containerPort: 6125
name: query-state
- containerPort: 9249
name: metrics
# livenessProbe:
# tcpSocket:
# port: 6122
# initialDelaySeconds: 30
# periodSeconds: 60
# volumeMounts:
# - name: flink-config-volume-rw
# mountPath: /opt/flink/conf/
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
# initContainers:
# - name: init-taskmanager
# image: busybox:1.28
# command: ['cp', '-a', '/flink-config/.', '/flink-config-rw/']
# volumeMounts:
# - name: flink-config-volume
# mountPath: /flink-config/
# - name: flink-config-volume-rw
# mountPath: /flink-config-rw/
# volumes:
# - name: flink-config-volume
# configMap:
# name: flink-config
# items:
# - key: flink-conf.yaml
# path: flink-conf.yaml
# - key: log4j-console.properties
# path: log4j-console.properties
# - name: flink-config-volume-rw
# emptyDir: {}
apiVersion: v1
kind: Service
metadata:
name: flink-taskmanager
labels:
app: flink
spec:
type: ClusterIP
ports:
- name: metrics
port: 9249
selector:
app: flink
component: taskmanager
\ No newline at end of file
apiVersion: theodolite.com/v1
kind: benchmark
metadata:
name: uc1-beam-flink
spec:
sut:
resources:
- configMap:
name: "benchmark-resources-uc1-beam-flink"
files:
- "flink-configuration-configmap.yaml"
- "taskmanager-deployment.yaml"
- "taskmanager-service.yaml"
- "service-monitor.yaml"
- "jobmanager-service.yaml"
- "jobmanager-deployment.yaml"
#- "jobmanager-rest-service.yaml"
loadGenerator:
resources:
- configMap:
name: "benchmark-resources-uc1-load-generator"
files:
- "uc1-load-generator-deployment.yaml"
- "uc1-load-generator-service.yaml"
resourceTypes:
- typeName: "Instances"
patchers:
- type: "ReplicaPatcher"
resource: "taskmanager-deployment.yaml"
- type: "EnvVarPatcher"
resource: "jobmanager-deployment.yaml"
properties:
container: "jobmanager"
variableName: "PARALLELISM"
- type: "EnvVarPatcher" # required?
resource: "taskmanager-deployment.yaml"
properties:
container: "taskmanager"
variableName: "PARALLELISM"
loadTypes:
- typeName: "NumSensors"
patchers:
- type: "EnvVarPatcher"
resource: "uc1-load-generator-deployment.yaml"
properties:
container: "workload-generator"
variableName: "NUM_SENSORS"
- type: NumSensorsLoadGeneratorReplicaPatcher
resource: "uc1-load-generator-deployment.yaml"
properties:
loadGenMaxRecords: "150000"
kafkaConfig:
bootstrapServer: "theodolite-kafka-kafka-bootstrap:9092"
topics:
- name: "input"
numPartitions: 40
replicationFactor: 1
\ No newline at end of file
apiVersion: v1
kind: Service
metadata:
name: titan-ccp-aggregation
labels:
app: titan-ccp-aggregation
spec:
#type: NodePort
selector:
app: titan-ccp-aggregation
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
- name: metrics
port: 5556
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment