Browse Source

Merge branch 'rc' of github.com:thingsboard/thingsboard into master-rc

pull/14589/head
Viacheslav Klimov 5 days ago
parent
commit
5f7161e860
  1. 3
      application/src/main/java/org/thingsboard/server/service/cf/ctx/state/RocksDBCalculatedFieldStateService.java
  2. 2
      application/src/main/resources/thingsboard.yml
  3. 29
      common/queue/src/main/java/org/thingsboard/server/queue/kafka/KafkaAdmin.java
  4. 1
      common/queue/src/main/java/org/thingsboard/server/queue/kafka/TbKafkaSettings.java
  5. 2
      edqs/src/main/resources/edqs.yml
  6. 2
      msa/edqs/docker/Dockerfile
  7. 2
      msa/monitoring/docker/Dockerfile
  8. 1
      msa/pom.xml
  9. 2
      msa/tb-node/docker/Dockerfile
  10. 2
      msa/transport/coap/docker/Dockerfile
  11. 2
      msa/transport/http/docker/Dockerfile
  12. 2
      msa/transport/lwm2m/docker/Dockerfile
  13. 2
      msa/transport/mqtt/docker/Dockerfile
  14. 2
      msa/transport/snmp/docker/Dockerfile
  15. 2
      msa/vc-executor-docker/docker/Dockerfile
  16. 2
      msa/vc-executor/src/main/resources/tb-vc-executor.yml
  17. 8
      packaging/java/build.gradle
  18. 2
      transport/coap/src/main/resources/tb-coap-transport.yml
  19. 2
      transport/http/src/main/resources/tb-http-transport.yml
  20. 2
      transport/lwm2m/src/main/resources/tb-lwm2m-transport.yml
  21. 2
      transport/mqtt/src/main/resources/tb-mqtt-transport.yml
  22. 2
      transport/snmp/src/main/resources/tb-snmp-transport.yml

3
application/src/main/java/org/thingsboard/server/service/cf/ctx/state/RocksDBCalculatedFieldStateService.java

@ -15,7 +15,6 @@
*/
package org.thingsboard.server.service.cf.ctx.state;
import com.google.protobuf.InvalidProtocolBufferException;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression;
@ -66,7 +65,7 @@ public class RocksDBCalculatedFieldStateService extends AbstractCalculatedFieldS
CalculatedFieldStateProto stateMsg;
try {
stateMsg = CalculatedFieldStateProto.parseFrom(value);
} catch (InvalidProtocolBufferException e) {
} catch (Exception e) {
log.error("Failed to parse CalculatedFieldStateProto for key {}", key, e);
return;
}

2
application/src/main/resources/thingsboard.yml

@ -1745,6 +1745,8 @@ queue:
print-interval-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_MIN_PRINT_INTERVAL_MS:60000}"
# Time to wait for the stats-loading requests to Kafka to finish
kafka-response-timeout-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_RESPONSE_TIMEOUT_MS:1000}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:

29
common/queue/src/main/java/org/thingsboard/server/queue/kafka/KafkaAdmin.java

@ -58,15 +58,12 @@ public class KafkaAdmin {
private final TbKafkaSettings settings;
@Value("${queue.kafka.request.timeout.ms:30000}")
private int requestTimeoutMs;
@Value("${queue.kafka.topics_cache_ttl_ms:300000}") // 5 minutes by default
private int topicsCacheTtlMs;
private final LazyInitializer<AdminClient> adminClient;
private final CachedValue<Set<String>> topics;
public KafkaAdmin(@Lazy TbKafkaSettings settings) {
public KafkaAdmin(@Lazy TbKafkaSettings settings,
@Value("${queue.kafka.topics_cache_ttl_ms:300000}")
int topicsCacheTtlMs) {
this.settings = settings;
this.adminClient = LazyInitializer.<AdminClient>builder()
.setInitializer(() -> AdminClient.create(settings.toAdminProps()))
@ -91,7 +88,7 @@ public class KafkaAdmin {
NewTopic newTopic = new NewTopic(topic, partitions, settings.getReplicationFactor()).configs(properties);
try {
getClient().createTopics(List.of(newTopic)).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
getClient().createTopics(List.of(newTopic)).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
topics.add(topic);
} catch (ExecutionException ee) {
log.trace("Failed to create topic {} with properties {}", topic, properties, ee);
@ -110,7 +107,7 @@ public class KafkaAdmin {
public void deleteTopic(String topic) {
log.debug("Deleting topic {}", topic);
try {
getClient().deleteTopics(List.of(topic)).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
getClient().deleteTopics(List.of(topic)).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
log.error("Failed to delete kafka topic [{}].", topic, e);
}
@ -122,7 +119,7 @@ public class KafkaAdmin {
public Set<String> listTopics() {
try {
Set<String> topics = getClient().listTopics().names().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
Set<String> topics = getClient().listTopics().names().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
log.trace("Listed topics: {}", topics);
return topics;
} catch (Exception e) {
@ -150,7 +147,7 @@ public class KafkaAdmin {
.collect(Collectors.toMap(tp -> tp, tp -> OffsetSpec.latest()));
Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> endOffsets =
getClient().listOffsets(latestOffsetsSpec).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
getClient().listOffsets(latestOffsetsSpec).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
return committedOffsets.entrySet().stream()
.mapToLong(entry -> {
@ -169,7 +166,7 @@ public class KafkaAdmin {
@SneakyThrows
public Map<TopicPartition, OffsetAndMetadata> getConsumerGroupOffsets(String groupId) {
return getClient().listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
return getClient().listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
}
/**
@ -212,7 +209,7 @@ public class KafkaAdmin {
} else {
log.info("[{}] SHOULD alter topic offset [{}] less than old node group offset [{}]", tp, existingOffset.offset(), om.offset());
}
getClient().alterConsumerGroupOffsets(newGroupId, Map.of(tp, om)).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
getClient().alterConsumerGroupOffsets(newGroupId, Map.of(tp, om)).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
log.info("[{}] altered new consumer groupId {}", tp, newGroupId);
break;
}
@ -229,7 +226,7 @@ public class KafkaAdmin {
return true;
}
List<TopicPartition> allPartitions = getClient().describeTopics(existingTopics).allTopicNames().get(requestTimeoutMs, TimeUnit.MILLISECONDS)
List<TopicPartition> allPartitions = getClient().describeTopics(existingTopics).allTopicNames().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS)
.entrySet().stream()
.flatMap(entry -> {
String topic = entry.getKey();
@ -239,9 +236,9 @@ public class KafkaAdmin {
.toList();
Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> beginningOffsets = getClient().listOffsets(allPartitions.stream()
.collect(Collectors.toMap(partition -> partition, partition -> OffsetSpec.earliest()))).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
.collect(Collectors.toMap(partition -> partition, partition -> OffsetSpec.earliest()))).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
Map<TopicPartition, ListOffsetsResult.ListOffsetsResultInfo> endOffsets = getClient().listOffsets(allPartitions.stream()
.collect(Collectors.toMap(partition -> partition, partition -> OffsetSpec.latest()))).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
.collect(Collectors.toMap(partition -> partition, partition -> OffsetSpec.latest()))).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
for (TopicPartition partition : allPartitions) {
long beginningOffset = beginningOffsets.get(partition).offset();
@ -261,7 +258,7 @@ public class KafkaAdmin {
public void deleteConsumerGroup(String consumerGroupId) {
try {
getClient().deleteConsumerGroups(List.of(consumerGroupId)).all().get(requestTimeoutMs, TimeUnit.MILLISECONDS);
getClient().deleteConsumerGroups(List.of(consumerGroupId)).all().get(settings.getRequestTimeoutMs(), TimeUnit.MILLISECONDS);
} catch (Exception e) {
log.warn("Failed to delete consumer group {}", consumerGroupId, e);
}

1
common/queue/src/main/java/org/thingsboard/server/queue/kafka/TbKafkaSettings.java

@ -112,6 +112,7 @@ public class TbKafkaSettings {
@Value("${queue.kafka.fetch_max_bytes:134217728}")
private int fetchMaxBytes;
@Getter
@Value("${queue.kafka.request.timeout.ms:30000}")
private int requestTimeoutMs;

2
edqs/src/main/resources/edqs.yml

@ -177,6 +177,8 @@ queue:
print-interval-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_MIN_PRINT_INTERVAL_MS:60000}"
# Time to wait for the stats-loading requests to Kafka to finish
kafka-response-timeout-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_RESPONSE_TIMEOUT_MS:1000}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256

2
msa/edqs/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-edqs.sh ${pkg.name}.deb /tmp/

2
msa/monitoring/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-monitoring.sh ${pkg.name}.deb /tmp/

1
msa/pom.xml

@ -32,6 +32,7 @@
<properties>
<main.dir>${basedir}/..</main.dir>
<docker.repo>thingsboard</docker.repo>
<docker.base.image>thingsboard/openjdk17:bookworm-slim</docker.base.image>
<dockerfile.skip>true</dockerfile.skip>
<blackBoxTests.skip>true</blackBoxTests.skip>
<dockerfile-maven.version>1.4.13</dockerfile-maven.version>

2
msa/tb-node/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY logback.xml start-tb-node.sh ${pkg.name}.deb /tmp/

2
msa/transport/coap/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-coap-transport.sh ${pkg.name}.deb /tmp/

2
msa/transport/http/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-http-transport.sh ${pkg.name}.deb /tmp/

2
msa/transport/lwm2m/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-lwm2m-transport.sh ${pkg.name}.deb /tmp/

2
msa/transport/mqtt/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-mqtt-transport.sh ${pkg.name}.deb /tmp/

2
msa/transport/snmp/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
COPY start-tb-snmp-transport.sh ${pkg.name}.deb /tmp/

2
msa/vc-executor-docker/docker/Dockerfile

@ -14,7 +14,7 @@
# limitations under the License.
#
FROM thingsboard/openjdk17:bookworm-slim
FROM ${docker.base.image}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update \

2
msa/vc-executor/src/main/resources/tb-vc-executor.yml

@ -151,6 +151,8 @@ queue:
print-interval-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_MIN_PRINT_INTERVAL_MS:60000}"
# Time to wait for the stats-loading requests to Kafka to finis
kafka-response-timeout-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_RESPONSE_TIMEOUT_MS:1000}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
core:

8
packaging/java/build.gradle

@ -92,7 +92,11 @@ buildRpm {
archiveVersion = projectVersion.replace('-', '')
archiveFileName = "${pkgName}.rpm"
requires("(java-17 or java-17-headless or jre-17 or jre-17-headless)") // .or() notation does work in RPM plugin
// Support Java 17 (existing), plus Java 21 and Java 25 for RPM-based distros
// Keep using RPM boolean expression syntax since .or() chaining is for DEB only
requires("(java-17 or java-17-headless or jre-17 or jre-17-headless or " +
"java-21 or java-21-headless or jre-21 or jre-21-headless or " +
"java-25 or java-25-headless or jre-25 or jre-25-headless)")
from("${buildDir}/conf") {
include "${pkgName}.conf"
@ -132,6 +136,8 @@ buildDeb {
archiveFileName = "${pkgName}.deb"
requires("openjdk-17-jre").or("java17-runtime").or("oracle-java17-installer").or("openjdk-17-jre-headless")
.or("openjdk-21-jre").or("java21-runtime").or("oracle-java21-installer").or("openjdk-21-jre-headless")
.or("openjdk-25-jre").or("java25-runtime").or("oracle-java25-installer").or("openjdk-25-jre-headless")
from("${buildDir}/conf") {
include "${pkgName}.conf"

2
transport/coap/src/main/resources/tb-coap-transport.yml

@ -332,6 +332,8 @@ queue:
notifications: "${TB_QUEUE_KAFKA_NOTIFICATIONS_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Housekeeper tasks topic
housekeeper: "${TB_QUEUE_KAFKA_HOUSEKEEPER_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:

2
transport/http/src/main/resources/tb-http-transport.yml

@ -281,6 +281,8 @@ queue:
notifications: "${TB_QUEUE_KAFKA_NOTIFICATIONS_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Housekeeper tasks topic
housekeeper: "${TB_QUEUE_KAFKA_HOUSEKEEPER_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:

2
transport/lwm2m/src/main/resources/tb-lwm2m-transport.yml

@ -382,6 +382,8 @@ queue:
notifications: "${TB_QUEUE_KAFKA_NOTIFICATIONS_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Housekeeper tasks topic
housekeeper: "${TB_QUEUE_KAFKA_HOUSEKEEPER_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:

2
transport/mqtt/src/main/resources/tb-mqtt-transport.yml

@ -315,6 +315,8 @@ queue:
notifications: "${TB_QUEUE_KAFKA_NOTIFICATIONS_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:1;min.insync.replicas:1}"
# Kafka properties for Housekeeper tasks topic
housekeeper: "${TB_QUEUE_KAFKA_HOUSEKEEPER_TOPIC_PROPERTIES:retention.ms:604800000;segment.bytes:52428800;retention.bytes:1048576000;partitions:10;min.insync.replicas:1}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:

2
transport/snmp/src/main/resources/tb-snmp-transport.yml

@ -270,6 +270,8 @@ queue:
print-interval-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_MIN_PRINT_INTERVAL_MS:60000}"
# Time to wait for the stats-loading requests to Kafka to finis
kafka-response-timeout-ms: "${TB_QUEUE_KAFKA_CONSUMER_STATS_RESPONSE_TIMEOUT_MS:1000}"
# Topics cache TTL in milliseconds. 5 minutes by default
topics_cache_ttl_ms: "${TB_QUEUE_KAFKA_TOPICS_CACHE_TTL_MS:300000}"
partitions:
hash_function_name: "${TB_QUEUE_PARTITIONS_HASH_FUNCTION_NAME:murmur3_128}" # murmur3_32, murmur3_128 or sha256
transport_api:

Loading…
Cancel
Save