Merge pull request #8998 from smatvienko-tb/feature/kafka-producer-append-debug-headers

TbKafkaProducerTemplate will add analytics headers for each message on debug or trace log level
This commit is contained in:
Andrew Shvayka 2023-07-31 17:26:01 +03:00 committed by GitHub
commit f618599d15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 101 additions and 1 deletions

View File

@ -30,6 +30,9 @@ import org.thingsboard.server.queue.TbQueueCallback;
import org.thingsboard.server.queue.TbQueueMsg; import org.thingsboard.server.queue.TbQueueMsg;
import org.thingsboard.server.queue.TbQueueProducer; import org.thingsboard.server.queue.TbQueueProducer;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.Objects;
import java.util.Properties; import java.util.Properties;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
@ -53,10 +56,14 @@ public class TbKafkaProducerTemplate<T extends TbQueueMsg> implements TbQueuePro
private final Set<TopicPartitionInfo> topics; private final Set<TopicPartitionInfo> topics;
@Getter
private final String clientId;
@Builder @Builder
private TbKafkaProducerTemplate(TbKafkaSettings settings, String defaultTopic, String clientId, TbQueueAdmin admin) { private TbKafkaProducerTemplate(TbKafkaSettings settings, String defaultTopic, String clientId, TbQueueAdmin admin) {
Properties props = settings.toProducerProps(); Properties props = settings.toProducerProps();
this.clientId = Objects.requireNonNull(clientId, "Kafka producer client.id is null");
if (!StringUtils.isEmpty(clientId)) { if (!StringUtils.isEmpty(clientId)) {
props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId); props.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
} }
@ -72,6 +79,22 @@ public class TbKafkaProducerTemplate<T extends TbQueueMsg> implements TbQueuePro
public void init() { public void init() {
} }
void addAnalyticHeaders(List<Header> headers) {
headers.add(new RecordHeader("_producerId", getClientId().getBytes(StandardCharsets.UTF_8)));
headers.add(new RecordHeader("_threadName", Thread.currentThread().getName().getBytes(StandardCharsets.UTF_8)));
if (log.isTraceEnabled()) {
try {
StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
int maxlevel = Math.min(stackTrace.length, 10);
for (int i = 2; i < maxlevel; i++) { // ignore two levels: getStackTrace and addAnalyticHeaders
headers.add(new RecordHeader("_stackTrace" + i, stackTrace[i].toString().getBytes(StandardCharsets.UTF_8)));
}
} catch (Throwable t) {
log.trace("Failed to add stacktrace headers in Kafka producer {}", getClientId(), t);
}
}
}
@Override @Override
public void send(TopicPartitionInfo tpi, T msg, TbQueueCallback callback) { public void send(TopicPartitionInfo tpi, T msg, TbQueueCallback callback) {
try { try {
@ -79,7 +102,10 @@ public class TbKafkaProducerTemplate<T extends TbQueueMsg> implements TbQueuePro
String key = msg.getKey().toString(); String key = msg.getKey().toString();
byte[] data = msg.getData(); byte[] data = msg.getData();
ProducerRecord<String, byte[]> record; ProducerRecord<String, byte[]> record;
Iterable<Header> headers = msg.getHeaders().getData().entrySet().stream().map(e -> new RecordHeader(e.getKey(), e.getValue())).collect(Collectors.toList()); List<Header> headers = msg.getHeaders().getData().entrySet().stream().map(e -> new RecordHeader(e.getKey(), e.getValue())).collect(Collectors.toList());
if (log.isDebugEnabled()) {
addAnalyticHeaders(headers);
}
record = new ProducerRecord<>(tpi.getFullTopicName(), null, key, data, headers); record = new ProducerRecord<>(tpi.getFullTopicName(), null, key, data, headers);
producer.send(record, (metadata, exception) -> { producer.send(record, (metadata, exception) -> {
if (exception == null) { if (exception == null) {

View File

@ -0,0 +1,54 @@
/**
* Copyright © 2016-2023 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.queue.kafka;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.common.header.Header;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.thingsboard.server.queue.TbQueueMsg;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.BDDMockito.willCallRealMethod;
import static org.mockito.BDDMockito.willReturn;
import static org.mockito.Mockito.mock;
@Slf4j
class TbKafkaProducerTemplateTest {
TbKafkaProducerTemplate<TbQueueMsg> producerTemplate;
@BeforeEach
void setUp() {
producerTemplate = mock(TbKafkaProducerTemplate.class);
willCallRealMethod().given(producerTemplate).addAnalyticHeaders(any());
willReturn("tb-core-to-core-notifications-tb-core-3").given(producerTemplate).getClientId();
}
@Test
void testAddAnalyticHeaders() {
List<Header> headers = new ArrayList<>();
producerTemplate.addAnalyticHeaders(headers);
assertThat(headers).isNotEmpty();
headers.forEach(r -> log.info("RecordHeader key [{}] value [{}]", r.key(), new String(r.value(), StandardCharsets.UTF_8)));
}
}

View File

@ -0,0 +1,20 @@
<?xml version="1.0" encoding="UTF-8" ?>
<configuration>
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{ISO8601} [%thread] %-5level %logger{36} - %msg%n</pattern>
</encoder>
</appender>
<!-- TbKafkaProducerTemplate will add headers for each message when log level:
- DEBUG - producerId and thread name
- TRACE - will add stacktrace.
Kafka compression is highly recommended -->
<logger name="org.thingsboard.server.queue.kafka.TbKafkaProducerTemplate" level="TRACE"/>
<root level="INFO">
<appender-ref ref="console"/>
</root>
</configuration>