Skip to content
Snippets Groups Projects
Commit ed7cc2c8 authored by Sören Henning's avatar Sören Henning
Browse files

Merge branch 'master' into hazelcastjet-she

parents 4702e63d 37e170d8
Branches
Tags
1 merge request!208Add benchmark implementations for Hazelcast Jet
Pipeline #6761 canceled
Showing
with 356 additions and 69 deletions
......@@ -8,7 +8,7 @@ authors:
given-names: Wilhelm
orcid: "https://orcid.org/0000-0001-6625-4335"
title: Theodolite
version: "0.6.3"
version: "0.6.4"
repository-code: "https://github.com/cau-se/theodolite"
license: "Apache-2.0"
doi: "10.1016/j.bdr.2021.100209"
......
......@@ -8,7 +8,7 @@
"dateModified": "2022-01-24",
"downloadUrl": "https://github.com/cau-se/theodolite/releases",
"name": "Theodolite",
"version": "0.6.3",
"version": "0.6.4",
"description": "Theodolite is a framework for benchmarking the horizontal and vertical scalability of cloud-native applications.",
"developmentStatus": "active",
"relatedLink": [
......
......@@ -42,7 +42,7 @@ spec:
properties:
loadGenMaxRecords: "150000"
kafkaConfig:
bootstrapServer: "theodolite-cp-kafka:9092"
bootstrapServer: "theodolite-kafka-kafka-bootstrap:9092"
topics:
- name: "input"
numPartitions: 40
......@@ -54,7 +54,7 @@ spec:
## System under Test (SUT), Load Generator and Infrastructure
In Thedolite, the system under test (SUT), the load generator as well as additional infrastructure (e.g., a middleware) are described by Kubernetes resources files.
In Theodolite, the system under test (SUT), the load generator as well as additional infrastructure (e.g., a middleware) are described by Kubernetes resources files.
All resources defined for the SUT and the load generator are started and stopped for each SLO experiment, with SUT resources being started before the load generator.
Infrastructure resources live over the entire duration of a benchmark run. They avoid time-consuming recreation of software components like middlewares, but should be used with caution to not let previous SLO experiments influence latte ones.
......
apiVersion: v1
entries:
theodolite:
- apiVersion: v2
appVersion: 0.6.4
created: "2022-02-16T16:09:11.967649304+01:00"
dependencies:
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.17.5
- condition: kube-prometheus-stack.enabled
name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 20.0.1
- condition: cp-helm-charts.enabled
name: cp-helm-charts
repository: https://soerenhenning.github.io/cp-helm-charts
version: 0.6.0
- condition: kafka-lag-exporter.enabled
name: kafka-lag-exporter
repository: https://seanglover.com/kafka-lag-exporter/repo
version: 0.6.7
description: Theodolite is a framework for benchmarking the horizontal and vertical
scalability of cloud-native applications.
digest: 10156d9917233ffa297aab093532038667d25b2babb2b2058a0a32e1dccb0cca
home: https://www.theodolite.rocks
maintainers:
- email: soeren.henning@email.uni-kiel.de
name: Sören Henning
url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
name: theodolite
sources:
- https://github.com/cau-se/theodolite
type: application
urls:
- https://github.com/cau-se/theodolite/releases/download/v0.6.4/theodolite-0.6.4.tgz
version: 0.6.4
- apiVersion: v2
appVersion: 0.6.3
created: "2022-01-24T13:40:40.07330713+01:00"
......@@ -141,6 +176,41 @@ entries:
urls:
- https://github.com/cau-se/theodolite/releases/download/v0.6.0/theodolite-0.6.0.tgz
version: 0.6.0
- apiVersion: v2
appVersion: 0.5.2
created: "2022-02-16T15:43:43.534374597+01:00"
dependencies:
- condition: grafana.enabled
name: grafana
repository: https://grafana.github.io/helm-charts
version: 6.17.5
- condition: kube-prometheus-stack.enabled
name: kube-prometheus-stack
repository: https://prometheus-community.github.io/helm-charts
version: 12.0.0
- condition: cp-helm-charts.enabled
name: cp-helm-charts
repository: https://soerenhenning.github.io/cp-helm-charts
version: 0.6.0
- condition: kafka-lag-exporter.enabled
name: kafka-lag-exporter
repository: https://seanglover.com/kafka-lag-exporter/repo
version: 0.6.6
description: Theodolite is a framework for benchmarking the scalability stream
processing engines.
digest: 72df752883d2161fdfc0e96bb90fe11f9c0ed4f71013e588ec170f2cbb178e9c
home: https://cau-se.github.io/theodolite
maintainers:
- email: soeren.henning@email.uni-kiel.de
name: Sören Henning
url: https://www.se.informatik.uni-kiel.de/en/team/soeren-henning-m-sc
name: theodolite
sources:
- https://github.com/cau-se/theodolite
type: application
urls:
- https://github.com/cau-se/theodolite/releases/download/v0.5.2/theodolite-0.5.2.tgz
version: 0.5.2
- apiVersion: v2
appVersion: 0.5.1
created: "2021-11-12T16:15:01.629937292+01:00"
......@@ -246,4 +316,4 @@ entries:
urls:
- https://github.com/cau-se/theodolite/releases/download/v0.4.0/theodolite-0.4.0.tgz
version: 0.4.0
generated: "2022-01-24T13:40:40.036786105+01:00"
generated: "2022-02-16T16:09:11.93111234+01:00"
......@@ -58,13 +58,13 @@ In cases, where you need to install multiple Theodolite instances, it's best to
### Installation with a release name other than `theodolite`
When using another release name than `theodolite`, make sure to adjust the Kafka Lag Exporter configuration of you `values.yaml` accordingly:
When using another release name than `theodolite`, make sure to adjust the Confluent Schema Registry configuration of you `values.yaml` accordingly:
```yaml
kafka-lag-exporter:
clusters:
- name: "<your-release-name>-cp-kafka"
bootstrapBrokers: "<your-release-name>-cp-kafka:9092"
cp-helm-charts:
cp-schema-registry:
kafka:
bootstrapServers: <your-release-name>-kafka-kafka-bootstrap:9092
```
This seems unfortunately to be necessary as Helm does not let us inject values into dependency charts.
......
......@@ -47,7 +47,7 @@ The prebuilt container images can be configured with the following environment v
| `PORT` | Port used for for coordination among load generator instances. | 5701 |
| `PORT_AUTO_INCREMENT` | If set to true and the specified PORT is already used, use the next higher one. Useful if multiple instances should run on the same host, without configuring each instance individually. | true |
| `CLUSTER_NAME_PREFIX` | Only required if unrelated load generators form a cluster. | theodolite-load-generation |
| `TARGET` | The target system the load generator send messages to. Valid values are: `kafka`, `http`. | `kafka` |
| `TARGET` | The target system the load generator send messages to. Valid values are: `kafka`, `http` and `pubsub`. | `kafka` |
| `KAFKA_BOOTSTRAP_SERVERS` | A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. See [Kafka producer config: `bootstrap.servers`](https://kafka.apache.org/documentation/#producerconfigs_bootstrap.servers) for more information. Only used if Kafka is set as `TARGET`. | `localhost:9092` |
| `KAFKA_INPUT_TOPIC` | Name of the Kafka topic, which should receive the generated messages. Only used if Kafka is set as `TARGET`. | input |
| `SCHEMA_REGISTRY_URL` | URL of the [Confluent Schema Registry](https://docs.confluent.io/platform/current/schema-registry). | `http://localhost:8081` |
......@@ -55,6 +55,9 @@ The prebuilt container images can be configured with the following environment v
| `KAFKA_LINGER_MS` | Value for the Kafka producer configuration: [`linger.ms`](https://kafka.apache.org/documentation/#producerconfigs_linger.ms). Only used if Kafka is set as `TARGET`. | see Kafka producer config: [`linger.ms`](https://kafka.apache.org/documentation/#producerconfigs_linger.ms) |
| `KAFKA_BUFFER_MEMORY` | Value for the Kafka producer configuration: [`buffer.memory`](https://kafka.apache.org/documentation/#producerconfigs_buffer.memory) Only used if Kafka is set as `TARGET`. | see Kafka producer config: [`buffer.memory`](https://kafka.apache.org/documentation/#producerconfigs_buffer.memory) |
| `HTTP_URL` | The URL the load generator should post messages to. Only used if HTTP is set as `TARGET`. | |
| `PUBSUB_INPUT_TOPIC` | The Google Cloud Pub/Sub topic to write messages to. Only used if Pub/Sub is set as `TARGET`. | input |
| `PUBSUB_PROJECT` | The Google Cloud this Pub/Sub topic is associated with. Only used if Pub/Sub is set as `TARGET`. | |
| `PUBSUB_EMULATOR_HOST` | A Pub/Sub emulator host. Only used if Pub/Sub is set as `TARGET`. | |
| `NUM_SENSORS` | The amount of simulated sensors. | 10 |
| `PERIOD_MS` | The time in milliseconds between generating two messages for the same sensor. With our Theodolite benchmarks, we apply an [open workload model](https://www.usenix.org/legacy/event/nsdi06/tech/full_papers/schroeder/schroeder.pdf) in which new messages are generated at a fixed rate, without considering the think time of the target server nor the time required for generating a message. | 1000 |
| `VALUE` | The constant `valueInW` of an `ActivePowerRecord`. | 10 |
......@@ -64,10 +67,10 @@ Please note that there are some additional configuration options for benchmark [
## Creating a custom load generator
To create a custom load generator, you need to import the [load-generator-commons](https://github.com/cau-se/theodolite/tree/master/theodolite-benchmarks/load-generator-commons) project. You can then create an instance of the `LoadGenerator` object and call its `run` method:
To create a custom load generator, you need to import the [load-generator-commons](https://github.com/cau-se/theodolite/tree/master/theodolite-benchmarks/load-generator-commons) project. You can then create an instance of the `LoadGenerator` populated with a default configuration, adjust it as desired, and start it by calling its `run` method:
```java
LoadGenerator loadGenerator = new LoadGenerator()
LoadGenerator loadGenerator = new LoadGenerator.fromDefaults()
.setClusterConfig(clusterConfig)
.setLoadDefinition(new WorkloadDefinition(
new KeySpace(key_prefix, numSensors),
......@@ -79,9 +82,8 @@ LoadGenerator loadGenerator = new LoadGenerator()
loadGenerator.run();
```
Alternatively, you can also start with a load generator populated with a default configuration or created from environment variables and then adjust the `LoadGenerator` as desired:
Alternatively, you can also start with a `LoadGenerator` created from environment variables and, optionally, adjust it as desired:
```java
LoadGenerator loadGeneratorFromDefaults = LoadGenerator.fromDefaults()
LoadGenerator loadGeneratorFromEnv = LoadGenerator.fromEnvironment();
LoadGenerator loadGenerator = LoadGenerator.fromEnvironment();
```
......@@ -24,10 +24,11 @@ dependencies:
version: 0.6.0
repository: https://soerenhenning.github.io/cp-helm-charts
condition: cp-helm-charts.enabled
- name: kafka-lag-exporter
version: 0.6.7
repository: https://lightbend.github.io/kafka-lag-exporter/repo/
condition: kafka-lag-exporter.enabled
- name: strimzi-kafka-operator
version: 0.28.0
repository: https://strimzi.io/charts/
condition: strimzi.enabled
version: 0.7.0-SNAPSHOT
......
......@@ -71,10 +71,10 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_input)",
"expr": "sum by (topic) (rate(kafka_server_brokertopicmetrics_messagesin_total{topic='input'}[30s]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{Messages In Per Second}}",
"legendFormat": "{{topic}}",
"refId": "D"
}
],
......@@ -162,10 +162,10 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "sum(cp_kafka_server_brokertopicmetrics_messagesinpersec_topic_output)",
"expr": "sum by (topic) (rate(kafka_server_brokertopicmetrics_messagesin_total{topic='output'}[30s]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{Messages Out Per Second}}",
"legendFormat": "{{topic}}",
"refId": "D"
}
],
......@@ -253,7 +253,7 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "sum by(group, topic) (kafka_consumergroup_group_lag >= 0)",
"expr": "sum by(consumergroup, topic) (kafka_consumergroup_lag >= 0)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{topic}}",
......@@ -344,10 +344,10 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "count(count (kafka_consumer_consumer_fetch_manager_metrics_records_lag) by(pod))",
"expr": "sum by(consumergroup) (kafka_consumergroup_members >= 0)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "instances",
"legendFormat": "{{consumergroup}}",
"refId": "D"
}
],
......@@ -436,7 +436,7 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "sum by(group,topic) (kafka_consumergroup_group_offset >= 0)",
"expr": "sum by(consumergroup,topic) (kafka_consumergroup_current_offset{topic='input'} >= 0)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{topic}}",
......@@ -527,7 +527,7 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "count by(group,topic) (kafka_consumergroup_group_offset >= 0)",
"expr": "sum by(topic) (kafka_topic_partitions >= 0)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{topic}}",
......@@ -618,7 +618,7 @@ data:
"steppedLine": false,
"targets": [
{
"expr": "sum by(group,topic) (kafka_partition_latest_offset)",
"expr": "sum by(topic) (kafka_topic_partition_current_offset)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "{{topic}}",
......
......@@ -2,7 +2,6 @@
apiVersion: v1
kind: Pod
metadata:
# name: {{ template "theodolite.fullname" . }}-kafka-client
name: {{ template "theodolite.fullname" . }}-kafka-client
spec:
containers:
......@@ -12,6 +11,9 @@ spec:
- sh
- -c
- "exec tail -f /dev/null"
env:
- name: BOOTSTRAP_SERVER
value: {{ template "theodolite.fullname" . }}-kafka-kafka-bootstrap:9092
{{- with .Values.kafkaClient.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
......
......@@ -5,11 +5,12 @@ metadata:
name: {{ template "theodolite.fullname" . }}-prometheus
spec:
serviceAccountName: {{ template "theodolite.fullname" . }}-prometheus
podMonitorSelector: {}
serviceMonitorSelector: {}
resources:
requests:
memory: 400Mi
#scrapeInterval: 1s
scrapeInterval: 15s
enableAdminAPI: true
{{- with .Values.prometheus.nodeSelector }}
nodeSelector:
......
{{- if .Values.strimzi.enabled -}}
apiVersion: kafka.strimzi.io/v1beta2
kind: Kafka
metadata:
name: {{ template "theodolite.fullname" . }}-kafka
spec:
kafka:
jmxOptions: {}
{{- with .Values.strimzi.kafka.listeners }}
listeners:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.strimzi.kafka.replicas }}
replicas:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.strimzi.kafka.config }}
config:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.strimzi.kafka.jvmOptions }}
jvmOptions:
{{- toYaml . | nindent 6 }}
{{- end }}
storage:
type: ephemeral
metricsConfig:
type: jmxPrometheusExporter
valueFrom:
configMapKeyRef:
name: {{ template "theodolite.fullname" . }}-kafka-metrics
key: kafka-metrics-config.yml
kafkaExporter: {}
zookeeper:
{{- with .Values.strimzi.zookeeper.replicas }}
replicas:
{{- toYaml . | nindent 6 }}
{{- end }}
storage:
type: ephemeral
{{- end }}
\ No newline at end of file
{{- if .Values.strimzi.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "theodolite.fullname" . }}-kafka-exporter-podmonitor
labels:
app: theodolite
spec:
selector:
selector:
matchLabels:
strimzi.io/name: {{ template "theodolite.fullname" . }}-kafka-kafka-exporter
podMetricsEndpoints:
- path: /metrics
port: tcp-prometheus
{{- end }}
\ No newline at end of file
{{- if .Values.strimzi.enabled -}}
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ template "theodolite.fullname" . }}-kafka-metrics
labels:
app: strimzi
data:
kafka-metrics-config.yml: |
# See https://github.com/prometheus/jmx_exporter for more info about JMX Prometheus Exporter metrics
lowercaseOutputName: true
rules:
# Special cases and very specific rules
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), topic=(.+), partition=(.*)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
topic: "$4"
partition: "$5"
- pattern: kafka.server<type=(.+), name=(.+), clientId=(.+), brokerHost=(.+), brokerPort=(.+)><>Value
name: kafka_server_$1_$2
type: GAUGE
labels:
clientId: "$3"
broker: "$4:$5"
- pattern: kafka.server<type=(.+), cipher=(.+), protocol=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_tls_info
type: GAUGE
labels:
cipher: "$2"
protocol: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: kafka.server<type=(.+), clientSoftwareName=(.+), clientSoftwareVersion=(.+), listener=(.+), networkProcessor=(.+)><>connections
name: kafka_server_$1_connections_software
type: GAUGE
labels:
clientSoftwareName: "$2"
clientSoftwareVersion: "$3"
listener: "$4"
networkProcessor: "$5"
- pattern: "kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+):"
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
- pattern: kafka.server<type=(.+), listener=(.+), networkProcessor=(.+)><>(.+)
name: kafka_server_$1_$4
type: GAUGE
labels:
listener: "$2"
networkProcessor: "$3"
# Some percent metrics use MeanRate attribute
# Ex) kafka.server<type=(KafkaRequestHandlerPool), name=(RequestHandlerAvgIdlePercent)><>MeanRate
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>MeanRate
name: kafka_$1_$2_$3_percent
type: GAUGE
# Generic gauges for percents
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
- pattern: kafka.(\w+)<type=(.+), name=(.+)Percent\w*, (.+)=(.+)><>Value
name: kafka_$1_$2_$3_percent
type: GAUGE
labels:
"$4": "$5"
# Generic per-second counters with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*, (.+)=(.+)><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)PerSec\w*><>Count
name: kafka_$1_$2_$3_total
type: COUNTER
# Generic gauges with 0-2 key/value pairs
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Value
name: kafka_$1_$2_$3
type: GAUGE
# Emulate Prometheus 'Summary' metrics for the exported 'Histogram's.
# Note that these are missing the '_sum' metric!
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
"$6": "$7"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*), (.+)=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
"$6": "$7"
quantile: "0.$8"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
labels:
"$4": "$5"
- pattern: kafka.(\w+)<type=(.+), name=(.+), (.+)=(.*)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
"$4": "$5"
quantile: "0.$6"
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>Count
name: kafka_$1_$2_$3_count
type: COUNTER
- pattern: kafka.(\w+)<type=(.+), name=(.+)><>(\d+)thPercentile
name: kafka_$1_$2_$3
type: GAUGE
labels:
quantile: "0.$4"
{{- end }}
\ No newline at end of file
{{- if .Values.strimzi.enabled -}}
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: {{ template "theodolite.fullname" . }}-kafka-resources-metrics
labels:
app: theodolite
spec:
selector:
selector:
matchLabels:
strimzi.io/name: {{ template "theodolite.fullname" . }}-kafka-kafka
podMetricsEndpoints:
- path: /metrics
port: tcp-prometheus
{{- end }}
\ No newline at end of file
......@@ -38,6 +38,7 @@ rules:
- monitoring.coreos.com
resources:
- servicemonitors
- podmonitors
verbs:
- update
- delete
......
......@@ -64,7 +64,7 @@ cp-helm-charts:
## Zookeeper
## ------------------------------------------------------
cp-zookeeper:
enabled: true
enabled: false
nodeSelector: {}
servers: 3
image: confluentinc/cp-zookeeper
......@@ -81,7 +81,7 @@ cp-helm-charts:
## Kafka
## ------------------------------------------------------
cp-kafka:
enabled: true
enabled: false
nodeSelector: {}
brokers: 3
image: confluentinc/cp-enterprise-kafka
......@@ -137,6 +137,9 @@ cp-helm-charts:
nodePort: 30099
annotations: {}
kafka:
bootstrapServers: theodolite-kafka-kafka-bootstrap:9092
cp-kafka-rest:
enabled: false
......@@ -149,29 +152,30 @@ cp-helm-charts:
cp-control-center:
enabled: false
###
# Kafka Lag Exporter
###
kafka-lag-exporter:
enabled: true
image:
pullPolicy: IfNotPresent
nodeSelector: {}
clusters:
- name: "theodolite-cp-kafka"
bootstrapBrokers: "theodolite-cp-kafka:9092"
## The interval between refreshing metrics
pollIntervalSeconds: 15
prometheus:
serviceMonitor:
strimzi:
enabled: true
interval: "5s"
additionalLabels:
appScope: titan-ccp
kafka:
listeners:
- name: plain
port: 9092
type: internal
tls: false
replicas: 3
config:
"message.max.bytes": "134217728" # 128 MB
"replica.fetch.max.bytes": "134217728" #128 MB
"auto.create.topics.enable": false
"log.retention.ms": "7200000" # 2h
"metrics.sample.window.ms": "5000" #5s
jvmOptions:
"-Xmx": "512M"
"-Xms": "512M"
zookeeper:
replicas: 3
###
......
......@@ -24,7 +24,7 @@ elif os.getenv('LOG_LEVEL') == 'DEBUG':
def calculate_slope_trend(results, warmup):
d = []
for result in results:
group = result['metric']['group']
group = result['metric']['consumergroup']
for value in result['values']:
d.append({'group': group, 'timestamp': int(
value[0]), 'value': int(value[1]) if value[1] != 'NaN' else 0})
......
......@@ -3,7 +3,7 @@
[
{
"metric": {
"group": "theodolite-uc1-application-0.0.1"
"consumergroup": "theodolite-uc1-application-0.0.1"
},
"values": [
[
......
......@@ -3,7 +3,7 @@
[
{
"metric": {
"group": "theodolite-uc1-application-0.0.1"
"consumergroup": "theodolite-uc1-application-0.0.1"
},
"values": [
[
......@@ -100,7 +100,7 @@
[
{
"metric": {
"group": "theodolite-uc1-application-0.0.1"
"consumergroup": "theodolite-uc1-application-0.0.1"
},
"values": [
[
......@@ -193,7 +193,7 @@
[
{
"metric": {
"group": "theodolite-uc1-application-0.0.1"
"consumergroup": "theodolite-uc1-application-0.0.1"
},
"values": [
[
......
......@@ -8,8 +8,8 @@ import org.slf4j.LoggerFactory;
import titan.ccp.common.configuration.ServiceConfigurations;
/**
* Abstraction of a Beam microservice.
* Encapsulates the corresponding {@link PipelineOptions} and the beam Runner.
* Abstraction of a Beam microservice. Encapsulates the corresponding {@link PipelineOptions} and
* the beam Runner.
*/
public class AbstractBeamService {
......@@ -20,9 +20,7 @@ public class AbstractBeamService {
// Application Configurations
private final Configuration config = ServiceConfigurations.createWithDefaults();
private final String applicationName =
config.getString(ConfigurationKeys.APPLICATION_NAME);
private final String applicationName = this.config.getString(ConfigurationKeys.APPLICATION_NAME);
/**
* Creates AbstractBeamService with options.
......@@ -33,13 +31,13 @@ public class AbstractBeamService {
for (final String s : args) {
LOGGER.info("{}", s);
}
options = PipelineOptionsFactory.fromArgs(args).create();
options.setJobName(applicationName);
LOGGER.info("Starting BeamService with PipelineOptions {}:", this.options.toString());
this.options = PipelineOptionsFactory.fromArgs(args).create();
this.options.setJobName(this.applicationName);
LOGGER.info("Starting BeamService with PipelineOptions: {}", this.options.toString());
}
public Configuration getConfig() {
return config;
return this.config;
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment