Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • she/theodolite
1 result
Show changes
Commits on Source (3)
Showing
with 433 additions and 65 deletions
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
......@@ -33,22 +33,11 @@ spec:
- name: PARALLELISM
value: "1"
- name: DISABLE_METRICS
value: "true"
value: "false"
- name: FASTER_COPY
value: "true"
- name: "FLINK_STATE_BACKEND"
value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources:
limits:
memory: 4Gi
......@@ -67,5 +56,17 @@ spec:
port: 6123
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
......@@ -19,9 +19,6 @@ spec:
- name: taskmanager
image: ghcr.io/cau-se/theodolite-uc1-beam-flink:latest
args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources:
limits:
memory: 4Gi
......@@ -33,5 +30,22 @@ spec:
name: query-state
- containerPort: 9249
name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
......@@ -33,22 +33,11 @@ spec:
- name: PARALLELISM
value: "1"
- name: DISABLE_METRICS
value: "true"
value: "false"
- name: FASTER_COPY
value: "true"
- name: "FLINK_STATE_BACKEND"
value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources:
limits:
memory: 4Gi
......@@ -67,5 +56,17 @@ spec:
port: 6123
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext:
runAsUser: 9999
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
......@@ -19,9 +19,6 @@ spec:
- name: taskmanager
image: ghcr.io/cau-se/theodolite-uc2-beam-flink:latest
args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources:
limits:
memory: 4Gi
......@@ -33,5 +30,22 @@ spec:
name: query-state
- containerPort: 9249
name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
......@@ -33,22 +33,11 @@ spec:
- name: PARALLELISM
value: "1"
- name: DISABLE_METRICS
value: "true"
value: "false"
- name: FASTER_COPY
value: "true"
- name: "FLINK_STATE_BACKEND"
value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources:
limits:
memory: 4Gi
......@@ -67,5 +56,17 @@ spec:
port: 6123
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext:
runAsUser: 9999
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
......@@ -19,9 +19,6 @@ spec:
- name: taskmanager
image: ghcr.io/cau-se/theodolite-uc3-beam-flink:latest
args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources:
limits:
memory: 4Gi
......@@ -33,5 +30,22 @@ spec:
name: query-state
- containerPort: 9249
name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
apiVersion: v1
kind: ConfigMap
metadata:
name: flink-config
labels:
app: flink
data:
flink-conf.yaml: |+
jobmanager.rpc.address: flink-jobmanager
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
metrics.reporter.prom.class: org.apache.flink.metrics.prometheus.PrometheusReporter
metrics.reporter.prom.interval: 10 SECONDS
# gives metrics about inbound/outbound network queue lengths
#taskmanager.network.detailed-metrics: true
log4j-console.properties: |+
# This affects logging for both user code and Flink
rootLogger.level = INFO
rootLogger.appenderRef.console.ref = ConsoleAppender
#rootLogger.appenderRef.rolling.ref = RollingFileAppender
# Uncomment this if you want to _only_ change Flink's logging
#logger.flink.name = org.apache.flink
#logger.flink.level = INFO
# The following lines keep the log level of common libraries/connectors on
# log level INFO. The root logger does not override this. You have to manually
# change the log levels here.
logger.akka.name = akka
logger.akka.level = INFO
logger.kafka.name= org.apache.kafka
logger.kafka.level = INFO
logger.hadoop.name = org.apache.hadoop
logger.hadoop.level = INFO
logger.zookeeper.name = org.apache.zookeeper
logger.zookeeper.level = INFO
# Log all infos to the console
appender.console.name = ConsoleAppender
appender.console.type = CONSOLE
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
# Log all infos in the given rolling file
#appender.rolling.name = RollingFileAppender
#appender.rolling.type = RollingFile
#appender.rolling.append = false
#appender.rolling.fileName = ${sys:log.file}
#appender.rolling.filePattern = ${sys:log.file}.%i
#appender.rolling.layout.type = PatternLayout
#appender.rolling.layout.pattern = %d{yyyy-MM-dd HH:mm:ss,SSS} %-5p %-60c %x - %m%n
#appender.rolling.policies.type = Policies
#appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
#appender.rolling.policies.size.size=100MB
#appender.rolling.strategy.type = DefaultRolloverStrategy
#appender.rolling.strategy.max = 10
# Suppress the irrelevant (wrong) warnings from the Netty channel handler
logger.netty.name = org.apache.flink.shaded.akka.org.jboss.netty.channel.DefaultChannelPipeline
logger.netty.level = OFF
......@@ -33,22 +33,11 @@ spec:
- name: PARALLELISM
value: "1"
- name: DISABLE_METRICS
value: "true"
value: "false"
- name: FASTER_COPY
value: "true"
- name: "FLINK_STATE_BACKEND"
value: "rocksdb"
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
- name: FLINK_PROPERTIES
value: |+
blob.server.port: 6124
jobmanager.rpc.port: 6123
taskmanager.rpc.port: 6122
queryable-state.proxy.ports: 6125
jobmanager.memory.process.size: 4Gb
taskmanager.memory.process.size: 4Gb
parallelism.default: 1 #TODO
resources:
limits:
memory: 4Gi
......@@ -67,5 +56,17 @@ spec:
port: 6123
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf
securityContext:
runAsUser: 9999
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
......@@ -19,9 +19,6 @@ spec:
- name: taskmanager
image: ghcr.io/cau-se/theodolite-uc4-beam-flink:latest
args: ["taskmanager"]
env:
- name: JOB_MANAGER_RPC_ADDRESS
value: "flink-jobmanager"
resources:
limits:
memory: 4Gi
......@@ -33,5 +30,22 @@ spec:
name: query-state
- containerPort: 9249
name: metrics
livenessProbe:
tcpSocket:
port: 6122
initialDelaySeconds: 30
periodSeconds: 60
volumeMounts:
- name: flink-config-volume
mountPath: /opt/flink/conf/
securityContext:
runAsUser: 9999 # refers to user _flink_ from official flink image, change if necessary
volumes:
- name: flink-config-volume
configMap:
name: flink-config
items:
- key: flink-conf.yaml
path: flink-conf.yaml
- key: log4j-console.properties
path: log4j-console.properties
......@@ -27,8 +27,8 @@ class ConfigMapYamlPatcher(
val parser = Yaml(dumperOptions)
// Change value
val yaml = parser.loadAs(yamlFile, LinkedHashMap<String, String>()::class.java)
yaml[variableName] = value
val yaml = parser.loadAs(yamlFile, LinkedHashMap<String, Any>()::class.java)
yaml[variableName] = value.toLongOrNull() ?: value.toDoubleOrNull() ?: value.toBooleanStrictOrNull() ?: value
// Convert back to String and set in Kubernetes resource
resource.data[fileName] = parser.dump(yaml)
......
package rocks.theodolite.kubernetes.patcher
import io.fabric8.kubernetes.api.model.ConfigMap
import io.fabric8.kubernetes.api.model.ConfigMapBuilder
import org.junit.jupiter.api.AfterEach
import org.junit.jupiter.api.Assertions.*
import org.junit.jupiter.api.BeforeEach
import org.junit.jupiter.api.Test
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.ValueSource
internal class ConfigMapYamlPatcherTest {
private lateinit var configMap: ConfigMap
private val patcher = ConfigMapYamlPatcher("some-file.yaml", "second")
@BeforeEach
fun setUp() {
val data = mapOf(
"some-file.yaml" to """
first: some-test
second: 1
third: 1234
""".trimIndent()
)
this.configMap = ConfigMapBuilder()
.withNewMetadata()
.withName("example")
.endMetadata()
.addToData(data)
.build()
}
@ParameterizedTest
@ValueSource(strings = ["some-string", "42", "42.42", "true"])
fun setSettingString(inputValue: String) {
val patched = patcher.patchSingleResource(this.configMap, inputValue)
assertTrue(patched is ConfigMap)
//patched.let { it as ConfigMap }.data
patched as ConfigMap
val yaml = patched.data["some-file.yaml"]
assertTrue(yaml != null)
val line = yaml!!.lines().getOrNull(1)
assertTrue(line != null)
val value = line!!.split(": ").getOrNull(1)
assertEquals(inputValue, value)
}
}
\ No newline at end of file