Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kafka-bom/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,6 @@ dependencies {
api("org.apache.kafka:kafka-clients:$confluentCcsVersion")
api("org.apache.kafka:kafka-streams:$confluentCcsVersion")
api("org.apache.kafka:kafka-streams-test-utils:$confluentCcsVersion")
api("org.apache.avro:avro:1.12.0")
api("org.apache.avro:avro:1.12.1")
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG;
import static org.apache.kafka.clients.consumer.ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG;
import static org.apache.kafka.clients.consumer.ConsumerConfig.FETCH_MIN_BYTES_CONFIG;
import static org.apache.kafka.clients.consumer.ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG;
import static org.apache.kafka.clients.producer.ProducerConfig.ACKS_CONFIG;
import static org.apache.kafka.clients.producer.ProducerConfig.BATCH_SIZE_CONFIG;
import static org.apache.kafka.clients.producer.ProducerConfig.COMPRESSION_TYPE_CONFIG;
Expand All @@ -19,6 +20,7 @@
import static org.apache.kafka.streams.StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG;
import static org.apache.kafka.streams.StreamsConfig.TOPOLOGY_OPTIMIZATION;
import static org.apache.kafka.streams.StreamsConfig.consumerPrefix;
import static org.apache.kafka.streams.StreamsConfig.mainConsumerPrefix;
import static org.apache.kafka.streams.StreamsConfig.producerPrefix;
import static org.apache.kafka.streams.StreamsConfig.topicPrefix;

Expand Down Expand Up @@ -46,6 +48,7 @@
import org.apache.kafka.streams.kstream.KStream;
import org.hypertrace.core.grpcutils.client.GrpcChannelRegistry;
import org.hypertrace.core.grpcutils.client.GrpcRegistryConfig;
import org.hypertrace.core.kafkastreams.framework.interceptors.metrics.MetricsInterceptor;
import org.hypertrace.core.kafkastreams.framework.listeners.LoggingStateListener;
import org.hypertrace.core.kafkastreams.framework.listeners.LoggingStateRestoreListener;
import org.hypertrace.core.kafkastreams.framework.rocksdb.BoundedMemoryConfigSetter;
Expand Down Expand Up @@ -102,6 +105,7 @@ protected void doInit() {
streamsConfig = mergeProperties(getBaseStreamsConfig(), getJobStreamsConfig(getAppConfig()));
// build topologies
Map<String, KStream<?, ?>> sourceStreams = new HashMap<>();

StreamsBuilder streamsBuilder = new StreamsBuilder();
streamsBuilder = buildTopology(streamsConfig, streamsBuilder, sourceStreams);
this.topology = streamsBuilder.build();
Expand Down Expand Up @@ -249,6 +253,11 @@ public Map<String, Object> getBaseStreamsConfig() {
// ##########################
baseStreamsConfig.put(topicPrefix(RETENTION_MS_CONFIG), TimeUnit.HOURS.toMillis(12));

// set metrics interceptor, only apply to the main consumer and skip instrumenting the restore
// consumer
baseStreamsConfig.put(
mainConsumerPrefix(INTERCEPTOR_CLASSES_CONFIG), MetricsInterceptor.class.getName());

return baseStreamsConfig;
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
package org.hypertrace.core.kafkastreams.framework.interceptors.metrics;

import io.micrometer.core.instrument.Counter;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerInterceptor;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.hypertrace.core.serviceframework.metrics.PlatformMetricsRegistry;

public class MetricsInterceptor implements ConsumerInterceptor<Object, Object> {
private static final String TIME_LAG_COUNTER_NAME = "kafka_records_time_lag";
private static final String NUM_RECORDS_COUNTER_NAME = "kafka_records_count";
private Counter timeLagCounter;
private Counter numRecordsCounter;

@Override
public ConsumerRecords<Object, Object> onConsume(ConsumerRecords<Object, Object> records) {
for (ConsumerRecord<Object, Object> record : records) {
timeLagCounter.increment(System.currentTimeMillis() - record.timestamp());
numRecordsCounter.increment();
}
return records;
}

@Override
public void close() {
// no-op
}

@Override
public void onCommit(Map offsets) {
// no-op
}

@Override
public void configure(Map<String, ?> configs) {
this.timeLagCounter = PlatformMetricsRegistry.getMeterRegistry().counter(TIME_LAG_COUNTER_NAME);
this.numRecordsCounter =
PlatformMetricsRegistry.getMeterRegistry().counter(NUM_RECORDS_COUNTER_NAME);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package org.hypertrace.core.kafkastreams.framework.interceptors.metrics;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;

import com.google.common.collect.Maps;
import com.typesafe.config.ConfigFactory;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
import org.hypertrace.core.serviceframework.metrics.PlatformMetricsRegistry;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

public class MetricsInterceptorTest {

@BeforeEach
void setup() {
Map<String, Object> config = new HashMap<>();
config.put("reporter.names", List.of("testing"));

PlatformMetricsRegistry.initMetricsRegistry("test", ConfigFactory.parseMap(config));
}

@Test
public void testOnConsume() {
TopicPartition tp = new TopicPartition("test-topic", 0);
List<ConsumerRecord<Object, Object>> records =
List.of(
new ConsumerRecord<>("test-topic", 0, 0, "k1", "v1"),
new ConsumerRecord<>("test-topic", 0, 1, "k2", null));

Map<TopicPartition, List<ConsumerRecord<Object, Object>>> map = Map.of(tp, records);
ConsumerRecords<Object, Object> input = new ConsumerRecords<>(map);
MetricsInterceptor interceptor = new MetricsInterceptor();
interceptor.configure(Maps.newTreeMap());
ConsumerRecords<Object, Object> output = interceptor.onConsume(input);
assertEquals(2, output.count());

assertEquals(
2.0, PlatformMetricsRegistry.getMeterRegistry().counter("kafka_records_count").count());
assertNotEquals(
0, PlatformMetricsRegistry.getMeterRegistry().counter("kafka_records_time_lag").count());
}
}
Loading