# Theodolite default values ### # Theodolite resources ### kafkaClient: enabled: true namespace: # TODO #### ## configuration of sub charts ### ### # Grafana ### grafana: enabled: true image: repository: grafana/grafana tag: 6.7.3 pullPolicy: IfNotPresent # Administrator credentials when not using an existing secret (see below) adminUser: admin adminPassword: admin ## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders ## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards sidecar: image: kiwigrid/k8s-sidecar:0.1.99 imagePullPolicy: IfNotPresent dashboards: enabled: true SCProvider: true # label that the configmaps with dashboards are marked with label: grafana_dashboard # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) folder: /tmp/dashboards # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead defaultFolderName: null # If specified, the sidecar will search for dashboard config-maps inside this namespace. # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null # provider configuration that lets grafana manage the dashboards provider: # name of the provider, should be unique name: sidecarProvider # orgid as configured in grafana orgid: 1 # folder in which the dashboards should be imported in grafana folder: '' # type of the provider type: file # disableDelete to activate a import-only behaviour disableDelete: false # allow updating provisioned dashboards from the UI allowUiUpdates: true datasources: enabled: true # label that the configmaps with datasources are marked with label: grafana_datasource # If specified, the sidecar will search for datasource config-maps inside this namespace. # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null service: nodePort: 31199 type: NodePort ### # Confluent Platform ### cp-helm-charts: enabled: true ## ------------------------------------------------------ ## Zookeeper ## ------------------------------------------------------ cp-zookeeper: enabled: true servers: 3 # default: 3 image: confluentinc/cp-zookeeper imageTag: 5.4.0 ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod imagePullSecrets: # - name: "regcred" heapOptions: "-Xms512M -Xmx512M" persistence: enabled: false resources: {} ## If you do want to specify resources, uncomment the following lines, adjust them as necessary, ## and remove the curly braces after 'resources:' # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi ## ------------------------------------------------------ ## Kafka ## ------------------------------------------------------ cp-kafka: enabled: true brokers: 10 # deauflt: 10 image: confluentinc/cp-enterprise-kafka imageTag: 5.4.0 ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod imagePullSecrets: # - name: "regcred" heapOptions: "-Xms512M -Xmx512M" persistence: enabled: false resources: {} ## If you do want to specify resources, uncomment the following lines, adjust them as necessary, ## and remove the curly braces after 'resources:' # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi configurationOverrides: #offsets.topic.replication.factor: 1 "message.max.bytes": "134217728" # 128 MB "replica.fetch.max.bytes": "134217728" # 128 MB #default.replication.factor: 1 # "min.insync.replicas": 2 # "auto.create.topics.enable": false "log.retention.ms": "10000" # 10s "metrics.sample.window.ms": "5000" #5s "advertised.listeners": |- EXTERNAL://${HOST_IP}:$((31090 + ${KAFKA_BROKER_ID})) "listener.security.protocol.map": |- PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT nodeport: enabled: false servicePort: 19092 firstListenerPort: 31090 ## ------------------------------------------------------ ## Schema Registry ## ------------------------------------------------------ cp-schema-registry: enabled: true image: confluentinc/cp-schema-registry imageTag: 5.4.0 ## Optionally specify an array of imagePullSecrets. Secrets must be manually created in the namespace. ## https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod imagePullSecrets: # - name: "regcred" heapOptions: "-Xms512M -Xmx512M" resources: {} external: enabled: true type: NodePort servicePort: 8081 nodePort: 30099 # davor wars 1 konflikt mit kafka 31091 annotations: {} ## If you do want to specify resources, uncomment the following lines, adjust them as necessary, ## and remove the curly braces after 'resources:' # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi cp-kafka-rest: enabled: false cp-kafka-connect: enabled: false cp-ksql-server: enabled: false cp-control-center: enabled: false ### # Kafka Lag Exporter ### kafka-lag-exporter: enabled: true clusters: - name: "my-confluent-cp-kafka" bootstrapBrokers: "my-confluent-cp-kafka:9092" ## The interval between refreshing metrics pollIntervalSeconds: 15 prometheus: serviceMonitor: enabled: true interval: "5s" additionalLabels: appScope: titan-ccp ### # Prometheus Monitoring Stack (Prometheus Operator) ### kube-prometheus-stack: commonLabels: appScope: titan-ccp alertmanager: enabled: false grafana: enabled: false kubeApiServer: enabled: false kubelet: enabled: false kubeControllerManager: enabled: false coreDns: enabled: false kubeDns: enabled: false kubeEtcd: enabled: false kubeScheduler: enabled: false kubeProxy: enabled: false kubeStateMetrics: enabled: false nodeExporter: enabled: false prometheusOperator: enabled: true prometheus: enabled: false ### # Prometheus ### prometheus: enabled: true # depends on your cluster security and permission settings, you may need to create the following resources serviceAccount: enabled: true clusterRole: enabled: true clusterRoleBinding: enabled: true