Skip to content
Snippets Groups Projects
Commit 8b8d2b71 authored by Sören Henning's avatar Sören Henning
Browse files

Remove DEBUG level for metrics

parent f9792191
No related branches found
No related tags found
1 merge request!6Add Distributed Workload Generator
Pipeline #386 failed
This commit is part of merge request !6. Comments created here will be created in the context of that merge request.
......@@ -84,7 +84,7 @@ public class KafkaStreamsBuilder {
.set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
.set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
.set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
.set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG")
// .set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG")
.build();
return new KafkaStreams(topologyBuilder.build(), properties);
}
......
......@@ -47,8 +47,8 @@ public class KafkaStreamsBuilder {
}
/**
* Sets the Kafka Streams property for the number of threads
* (num.stream.threads). Can be minus one for using the default.
* Sets the Kafka Streams property for the number of threads (num.stream.threads). Can be minus
* one for using the default.
*/
public KafkaStreamsBuilder numThreads(final int numThreads) {
if (numThreads < -1 || numThreads == 0) {
......@@ -59,10 +59,9 @@ public class KafkaStreamsBuilder {
}
/**
* Sets the Kafka Streams property for the frequency with which to save the
* position (offsets in source topics) of tasks (commit.interval.ms). Must be
* zero for processing all record, for example, when processing bulks of
* records. Can be minus one for using the default.
* Sets the Kafka Streams property for the frequency with which to save the position (offsets in
* source topics) of tasks (commit.interval.ms). Must be zero for processing all record, for
* example, when processing bulks of records. Can be minus one for using the default.
*/
public KafkaStreamsBuilder commitIntervalMs(final int commitIntervalMs) {
if (commitIntervalMs < -1) {
......@@ -73,10 +72,9 @@ public class KafkaStreamsBuilder {
}
/**
* Sets the Kafka Streams property for maximum number of memory bytes to be used
* for record caches across all threads (cache.max.bytes.buffering). Must be
* zero for processing all record, for example, when processing bulks of
* records. Can be minus one for using the default.
* Sets the Kafka Streams property for maximum number of memory bytes to be used for record caches
* across all threads (cache.max.bytes.buffering). Must be zero for processing all record, for
* example, when processing bulks of records. Can be minus one for using the default.
*/
public KafkaStreamsBuilder cacheMaxBytesBuffering(final int cacheMaxBytesBuffering) {
if (cacheMaxBytesBuffering < -1) {
......@@ -99,7 +97,8 @@ public class KafkaStreamsBuilder {
.set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
.set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
.set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
.set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG").build();
// .set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG")
.build();
return new KafkaStreams(topologyBuilder.build(), properties);
}
......
......@@ -109,7 +109,8 @@ public class KafkaStreamsBuilder {
.set(StreamsConfig.NUM_STREAM_THREADS_CONFIG, this.numThreads, p -> p > 0)
.set(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, this.commitIntervalMs, p -> p >= 0)
.set(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG, this.cacheMaxBytesBuff, p -> p >= 0)
.set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG").build();
// .set(StreamsConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG")
.build();
return new KafkaStreams(topologyBuilder.build(), properties);
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment