diff --git a/execution/helm/theodolite-chart/templates/kafka-client.yaml b/execution/helm/theodolite-chart/templates/kafka-client.yaml
index 026f36ed8d8bad990a03e7750e2a37057cf035bc..f0dc094328e7d5629a5d5228fc74e9ae329764c7 100644
--- a/execution/helm/theodolite-chart/templates/kafka-client.yaml
+++ b/execution/helm/theodolite-chart/templates/kafka-client.yaml
@@ -11,4 +11,8 @@ spec:
- sh
- -c
- "exec tail -f /dev/null"
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- end }}
\ No newline at end of file
diff --git a/execution/helm/theodolite-chart/templates/zookeeper-client.yaml b/execution/helm/theodolite-chart/templates/zookeeper-client.yaml
index e665682c17b93bff4940dd206267bb3562a0cb7e..995703c127ca9eeec011df98ed6792990cc199de 100644
--- a/execution/helm/theodolite-chart/templates/zookeeper-client.yaml
+++ b/execution/helm/theodolite-chart/templates/zookeeper-client.yaml
@@ -11,4 +11,8 @@ spec:
- sh
- -c
- "exec tail -f /dev/null"
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- toYaml . | nindent 8 }}
+ {{- end }}
{{- end}}
diff --git a/execution/helm/theodolite-chart/values.yaml b/execution/helm/theodolite-chart/values.yaml
index f1fd5f2f48e4ee6c6db78bffc11b191af624e50b..4be4b96204b72a5d79270f4a26520e72da582d0b 100644
--- a/execution/helm/theodolite-chart/values.yaml
+++ b/execution/helm/theodolite-chart/values.yaml
@@ -1,16 +1,15 @@
-# Theodolite default values
-
###
# Theodolite resources
###
kafkaClient:
enabled: true
- namespace: # TODO
+ nodeSelector: {}
ZookeeperClient:
enabled: true
- namespace: # TODO
+ nodeSelector: {}
+
####
@@ -22,17 +21,14 @@ ZookeeperClient:
###
grafana:
enabled: true
-
+ nodeSelector: {}
image:
repository: grafana/grafana
tag: 6.7.3
pullPolicy: IfNotPresent
-
# Administrator credentials when not using an existing secret (see below)
adminUser: admin
adminPassword: admin
-
-
## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders
## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards
sidecar:
@@ -40,40 +36,11 @@ grafana:
imagePullPolicy: IfNotPresent
dashboards:
enabled: true
- SCProvider: true
- # label that the configmaps with dashboards are marked with
- label: grafana_dashboard
- # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set)
- folder: /tmp/dashboards
- # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead
- defaultFolderName: null
- # If specified, the sidecar will search for dashboard config-maps inside this namespace.
- # Otherwise the namespace in which the sidecar is running will be used.
- # It's also possible to specify ALL to search in all namespaces
- searchNamespace: null
- # provider configuration that lets grafana manage the dashboards
provider:
- # name of the provider, should be unique
- name: sidecarProvider
- # orgid as configured in grafana
- orgid: 1
- # folder in which the dashboards should be imported in grafana
- folder: ''
- # type of the provider
- type: file
- # disableDelete to activate a import-only behaviour
- disableDelete: false
# allow updating provisioned dashboards from the UI
allowUiUpdates: true
datasources:
enabled: true
- # label that the configmaps with datasources are marked with
- label: grafana_datasource
- # If specified, the sidecar will search for datasource config-maps inside this namespace.
- # Otherwise the namespace in which the sidecar is running will be used.
- # It's also possible to specify ALL to search in all namespaces
- searchNamespace: null
-
service:
nodePort: 31199
type: NodePort
@@ -82,6 +49,7 @@ grafana:
###
# Confluent Platform
###
+
cp-helm-charts:
enabled: true
## ------------------------------------------------------
@@ -89,6 +57,7 @@ cp-helm-charts:
## ------------------------------------------------------
cp-zookeeper:
enabled: true
+ nodeSelector: {}
servers: 3 # default: 3
image: confluentinc/cp-zookeeper
imageTag: 5.4.0
@@ -99,21 +68,13 @@ cp-helm-charts:
heapOptions: "-Xms512M -Xmx512M"
persistence:
enabled: false
- resources: {}
- ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
- ## and remove the curly braces after 'resources:'
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
## ------------------------------------------------------
## Kafka
## ------------------------------------------------------
cp-kafka:
enabled: true
+ nodeSelector: {}
brokers: 10 # deauflt: 10
image: confluentinc/cp-enterprise-kafka
imageTag: 5.4.0
@@ -125,16 +86,6 @@ cp-helm-charts:
persistence:
enabled: false
resources: {}
- ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
- ## and remove the curly braces after 'resources:'
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
-
configurationOverrides:
#offsets.topic.replication.factor: 1
"message.max.bytes": "134217728" # 128 MB
@@ -160,6 +111,7 @@ cp-helm-charts:
## ------------------------------------------------------
cp-schema-registry:
enabled: true
+ nodeSelector: {}
image: confluentinc/cp-schema-registry
imageTag: 5.4.0
## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace.
@@ -176,15 +128,6 @@ cp-helm-charts:
nodePort: 30099 # davor wars 1 konflikt mit kafka 31091
annotations: {}
- ## If you do want to specify resources, uncomment the following lines, adjust them as necessary,
- ## and remove the curly braces after 'resources:'
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
cp-kafka-rest:
enabled: false
@@ -203,6 +146,7 @@ cp-helm-charts:
###
kafka-lag-exporter:
enabled: true
+ nodeSelector: {}
clusters:
- name: "my-confluent-cp-kafka"
bootstrapBrokers: "my-confluent-cp-kafka:9092"
@@ -262,6 +206,7 @@ kube-prometheus-stack:
enabled: false
prometheusOperator:
+ nodeSelector: {}
enabled: true
prometheus:
@@ -273,6 +218,7 @@ kube-prometheus-stack:
###
prometheus:
enabled: true
+ nodeSelector: {}
# depends on your cluster security and permission settings, you may need to create the following resources
serviceAccount: