Skip to content
Snippets Groups Projects
Commit d98557b0 authored by Sören Henning's avatar Sören Henning
Browse files

Use ConfigMap configuration for Beam/Flink

parent 2520dec9
No related branches found
No related tags found
No related merge requests found
Pipeline #10343 passed
Showing
with 375 additions and 59 deletions
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
...@@ -38,17 +38,6 @@ spec: ...@@ -38,17 +38,6 @@ spec:
value: "true" value: "true"
- name: "FLINK_STATE_BACKEND" - name: "FLINK_STATE_BACKEND"
value: "rocksdb" value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -67,5 +56,17 @@ spec: ...@@ -67,5 +56,17 @@ spec:
port: 6123 port: 6123
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 60 periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext: securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
...@@ -19,9 +19,6 @@ spec: ...@@ -19,9 +19,6 @@ spec:
- name: taskmanager - name: taskmanager
image: ghcr.io/cau-se/theodolite-uc1-beam-flink:latest image: ghcr.io/cau-se/theodolite-uc1-beam-flink:latest
args: ["taskmanager"] args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -33,5 +30,22 @@ spec: ...@@ -33,5 +30,22 @@ spec:
name: query-state name: query-state
- containerPort: 9249 - containerPort: 9249
name: metrics name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext: securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
...@@ -38,17 +38,6 @@ spec: ...@@ -38,17 +38,6 @@ spec:
value: "true" value: "true"
- name: "FLINK_STATE_BACKEND" - name: "FLINK_STATE_BACKEND"
value: "rocksdb" value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -67,5 +56,17 @@ spec: ...@@ -67,5 +56,17 @@ spec:
port: 6123 port: 6123
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 60 periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext: securityContext:
runAsUser: 9999 runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
...@@ -19,9 +19,6 @@ spec: ...@@ -19,9 +19,6 @@ spec:
- name: taskmanager - name: taskmanager
image: ghcr.io/cau-se/theodolite-uc2-beam-flink:latest image: ghcr.io/cau-se/theodolite-uc2-beam-flink:latest
args: ["taskmanager"] args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -33,5 +30,22 @@ spec: ...@@ -33,5 +30,22 @@ spec:
name: query-state name: query-state
- containerPort: 9249 - containerPort: 9249
name: metrics name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext: securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
...@@ -38,17 +38,6 @@ spec: ...@@ -38,17 +38,6 @@ spec:
value: "true" value: "true"
- name: "FLINK_STATE_BACKEND" - name: "FLINK_STATE_BACKEND"
value: "rocksdb" value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -67,5 +56,17 @@ spec: ...@@ -67,5 +56,17 @@ spec:
port: 6123 port: 6123
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 60 periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext: securityContext:
runAsUser: 9999 runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
...@@ -19,9 +19,6 @@ spec: ...@@ -19,9 +19,6 @@ spec:
- name: taskmanager - name: taskmanager
image: ghcr.io/cau-se/theodolite-uc3-beam-flink:latest image: ghcr.io/cau-se/theodolite-uc3-beam-flink:latest
args: ["taskmanager"] args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -33,5 +30,22 @@ spec: ...@@ -33,5 +30,22 @@ spec:
name: query-state name: query-state
- containerPort: 9249 - containerPort: 9249
name: metrics name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext: securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
...@@ -38,17 +38,6 @@ spec: ...@@ -38,17 +38,6 @@ spec:
value: "true" value: "true"
- name: "FLINK_STATE_BACKEND" - name: "FLINK_STATE_BACKEND"
value: "rocksdb" value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -67,5 +56,17 @@ spec: ...@@ -67,5 +56,17 @@ spec:
port: 6123 port: 6123
initialDelaySeconds: 30 initialDelaySeconds: 30
periodSeconds: 60 periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext: securityContext:
runAsUser: 9999 runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
...@@ -19,9 +19,6 @@ spec: ...@@ -19,9 +19,6 @@ spec:
- name: taskmanager - name: taskmanager
image: ghcr.io/cau-se/theodolite-uc4-beam-flink:latest image: ghcr.io/cau-se/theodolite-uc4-beam-flink:latest
args: ["taskmanager"] args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources: resources:
limits: limits:
memory: 4Gi memory: 4Gi
...@@ -33,5 +30,22 @@ spec: ...@@ -33,5 +30,22 @@ spec:
name: query-state name: query-state
- containerPort: 9249 - containerPort: 9249
name: metrics name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext: securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment