Merge branch 'master' of github.com:thingsboard/thingsboard
This commit is contained in:
commit
9f47e00a63
@ -287,7 +287,11 @@ public class CalculatedFieldEntityMessageProcessor extends AbstractContextAwareM
|
||||
state.checkStateSize(ctxId, ctx.getMaxStateSize());
|
||||
stateSizeChecked = true;
|
||||
if (state.isSizeOk()) {
|
||||
cfService.pushMsgToRuleEngine(tenantId, entityId, calculationResult, cfIdList, callback);
|
||||
if (!calculationResult.isEmpty()) {
|
||||
cfService.pushMsgToRuleEngine(tenantId, entityId, calculationResult, cfIdList, callback);
|
||||
} else {
|
||||
callback.onSuccess();
|
||||
}
|
||||
if (DebugModeUtil.isDebugAllAvailable(ctx.getCalculatedField())) {
|
||||
systemContext.persistCalculatedFieldDebugEvent(tenantId, ctx.getCfId(), entityId, state.getArguments(), tbMsgId, tbMsgType, JacksonUtil.writeValueAsString(calculationResult.getResult()), null);
|
||||
}
|
||||
|
||||
@ -27,4 +27,11 @@ public final class CalculatedFieldResult {
|
||||
private final AttributeScope scope;
|
||||
private final JsonNode result;
|
||||
|
||||
public boolean isEmpty() {
|
||||
return result == null || result.isMissingNode() || result.isNull() ||
|
||||
(result.isObject() && result.isEmpty()) ||
|
||||
(result.isArray() && result.isEmpty()) ||
|
||||
(result.isTextual() && result.asText().isEmpty());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -47,12 +47,20 @@ public class DefaultCalculatedFieldInitService implements CalculatedFieldInitSer
|
||||
PageDataIterable<ProfileEntityIdInfo> deviceIdInfos = new PageDataIterable<>(deviceService::findProfileEntityIdInfos, initFetchPackSize);
|
||||
for (ProfileEntityIdInfo idInfo : deviceIdInfos) {
|
||||
log.trace("Processing device record: {}", idInfo);
|
||||
entityProfileCache.add(idInfo.getTenantId(), idInfo.getProfileId(), idInfo.getEntityId());
|
||||
try {
|
||||
entityProfileCache.add(idInfo.getTenantId(), idInfo.getProfileId(), idInfo.getEntityId());
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to process device record: {}", idInfo, e);
|
||||
}
|
||||
}
|
||||
PageDataIterable<ProfileEntityIdInfo> assetIdInfos = new PageDataIterable<>(assetService::findProfileEntityIdInfos, initFetchPackSize);
|
||||
for (ProfileEntityIdInfo idInfo : assetIdInfos) {
|
||||
log.trace("Processing asset record: {}", idInfo);
|
||||
entityProfileCache.add(idInfo.getTenantId(), idInfo.getProfileId(), idInfo.getEntityId());
|
||||
try {
|
||||
entityProfileCache.add(idInfo.getTenantId(), idInfo.getProfileId(), idInfo.getEntityId());
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to process asset record: {}", idInfo, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -83,7 +83,7 @@ public class CalculatedFieldCtx {
|
||||
for (Map.Entry<String, Argument> entry : arguments.entrySet()) {
|
||||
var refId = entry.getValue().getRefEntityId();
|
||||
var refKey = entry.getValue().getRefEntityKey();
|
||||
if (refId == null) {
|
||||
if (refId == null || refId.equals(calculatedField.getEntityId())) {
|
||||
mainEntityArguments.put(refKey, entry.getKey());
|
||||
} else {
|
||||
linkedEntityArguments.computeIfAbsent(refId, key -> new HashMap<>()).put(refKey, entry.getKey());
|
||||
|
||||
@ -53,8 +53,6 @@ import static org.thingsboard.server.service.state.DefaultDeviceStateService.LAS
|
||||
@ConditionalOnExpression("'${queue.type:null}'=='kafka' && ${edges.enabled:true} && ${sql.ttl.edge_events.edge_events_ttl:0} > 0")
|
||||
public class KafkaEdgeTopicsCleanUpService extends AbstractCleanUpService {
|
||||
|
||||
private static final String EDGE_EVENT_TOPIC_NAME = "tb_edge_event.notifications.";
|
||||
|
||||
private final TopicService topicService;
|
||||
private final TenantService tenantService;
|
||||
private final EdgeService edgeService;
|
||||
@ -64,6 +62,9 @@ public class KafkaEdgeTopicsCleanUpService extends AbstractCleanUpService {
|
||||
@Value("${sql.ttl.edge_events.edge_events_ttl:2628000}")
|
||||
private long ttlSeconds;
|
||||
|
||||
@Value("${queue.edge.event-notifications-topic:tb_edge_event.notifications}")
|
||||
private String tbEdgeEventNotificationsTopic;
|
||||
|
||||
public KafkaEdgeTopicsCleanUpService(PartitionService partitionService, EdgeService edgeService,
|
||||
TenantService tenantService, AttributesService attributesService,
|
||||
TopicService topicService, TbKafkaSettings kafkaSettings, TbKafkaTopicConfigs kafkaTopicConfigs) {
|
||||
@ -86,7 +87,7 @@ public class KafkaEdgeTopicsCleanUpService extends AbstractCleanUpService {
|
||||
return;
|
||||
}
|
||||
|
||||
String edgeTopicPrefix = topicService.buildTopicName(EDGE_EVENT_TOPIC_NAME);
|
||||
String edgeTopicPrefix = topicService.buildTopicName(tbEdgeEventNotificationsTopic);
|
||||
List<String> matchingTopics = topics.stream().filter(topic -> topic.startsWith(edgeTopicPrefix)).toList();
|
||||
if (matchingTopics.isEmpty()) {
|
||||
log.debug("No matching topics found with prefix [{}]. Skipping cleanup.", edgeTopicPrefix);
|
||||
@ -147,7 +148,7 @@ public class KafkaEdgeTopicsCleanUpService extends AbstractCleanUpService {
|
||||
try {
|
||||
String remaining = topic.substring(prefix.length());
|
||||
String[] parts = remaining.split("\\.");
|
||||
TenantId tenantId = new TenantId(UUID.fromString(parts[0]));
|
||||
TenantId tenantId = TenantId.fromUUID(UUID.fromString(parts[0]));
|
||||
EdgeId edgeId = new EdgeId(UUID.fromString(parts[1]));
|
||||
tenantEdgeMap.computeIfAbsent(tenantId, id -> new ArrayList<>()).add(edgeId);
|
||||
} catch (Exception e) {
|
||||
|
||||
@ -48,6 +48,9 @@ import static org.awaitility.Awaitility.await;
|
||||
@DaoSqlTest
|
||||
public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTest {
|
||||
|
||||
public static final int TIMEOUT = 60;
|
||||
public static final int POLL_INTERVAL = 1;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() throws Exception {
|
||||
loginTenantAdmin();
|
||||
@ -86,6 +89,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
CalculatedField savedCalculatedField = doPost("/api/calculatedField", calculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("create CF -> perform initial calculation").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -95,6 +99,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/DEVICE/" + testDevice.getUuidId() + "/timeseries/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"temperature\":30}"));
|
||||
|
||||
await().alias("update telemetry -> recalculate state").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -108,6 +113,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
savedCalculatedField = doPost("/api/calculatedField", savedCalculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("update CF output -> perform calculation with updated output").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ArrayNode temperatureF = getServerAttributes(testDevice.getId(), "temperatureF");
|
||||
assertThat(temperatureF).isNotNull();
|
||||
@ -119,6 +125,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
savedCalculatedField = doPost("/api/calculatedField", savedCalculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("update CF argument -> perform calculation with new argument").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ArrayNode temperatureF = getServerAttributes(testDevice.getId(), "temperatureF");
|
||||
assertThat(temperatureF).isNotNull();
|
||||
@ -129,6 +136,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
savedCalculatedField = doPost("/api/calculatedField", savedCalculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("update CF expression -> perform calculation with new expression").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ArrayNode temperatureF = getServerAttributes(testDevice.getId(), "temperatureF");
|
||||
assertThat(temperatureF).isNotNull();
|
||||
@ -166,6 +174,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
CalculatedField savedCalculatedField = doPost("/api/calculatedField", calculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("create CF -> state is not ready -> no calculation performed").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -175,6 +184,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/DEVICE/" + testDevice.getUuidId() + "/timeseries/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"temperature\":30}"));
|
||||
|
||||
await().alias("update telemetry -> perform calculation").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -213,6 +223,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
CalculatedField savedCalculatedField = doPost("/api/calculatedField", calculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("create CF -> perform initial calculation with default value").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -222,6 +233,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/DEVICE/" + testDevice.getUuidId() + "/timeseries/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"temperature\":30}"));
|
||||
|
||||
await().alias("update telemetry -> recalculate state").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -277,6 +289,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/calculatedField", calculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("create CF and perform initial calculation").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 1
|
||||
ArrayNode z1 = getServerAttributes(asset1.getId(), "z");
|
||||
@ -292,6 +305,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/DEVICE/" + testDevice.getUuidId() + "/attributes/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"x\":25}"));
|
||||
|
||||
await().alias("update device telemetry -> recalculate state for all assets").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 1
|
||||
ArrayNode z1 = getServerAttributes(asset1.getId(), "z");
|
||||
@ -307,6 +321,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/ASSET/" + asset1.getUuidId() + "/attributes/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"y\":15}"));
|
||||
|
||||
await().alias("update asset 1 telemetry -> recalculate state only for asset 1").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 1
|
||||
ArrayNode z1 = getServerAttributes(asset1.getId(), "z");
|
||||
@ -322,6 +337,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/ASSET/" + asset2.getUuidId() + "/attributes/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"y\":5}"));
|
||||
|
||||
await().alias("update asset 2 telemetry -> recalculate state only for asset 2").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 1 (no changes)
|
||||
ArrayNode z1 = getServerAttributes(asset1.getId(), "z");
|
||||
@ -339,6 +355,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
|
||||
Asset finalAsset3 = asset3;
|
||||
await().alias("add new entity to profile -> calculate state for new entity").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 3
|
||||
ArrayNode z3 = getServerAttributes(finalAsset3.getId(), "z");
|
||||
@ -349,6 +366,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/DEVICE/" + testDevice.getUuidId() + "/attributes/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"x\":20}"));
|
||||
|
||||
await().alias("update device telemetry -> recalculate state for all assets").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 1
|
||||
ArrayNode z1 = getServerAttributes(asset1.getId(), "z");
|
||||
@ -375,6 +393,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
|
||||
Asset updatedAsset3 = asset3;
|
||||
await().alias("update device telemetry -> recalculate state for asset 1 and asset 2").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
// result of asset 1
|
||||
ArrayNode z1 = getServerAttributes(asset1.getId(), "z");
|
||||
@ -425,6 +444,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
CalculatedField savedCalculatedField = doPost("/api/calculatedField", calculatedField, CalculatedField.class);
|
||||
|
||||
await().alias("create CF -> ctx is not initialized -> no calculation perform").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
@ -434,6 +454,7 @@ public class CalculatedFieldIntegrationTest extends CalculatedFieldControllerTes
|
||||
doPost("/api/plugins/telemetry/DEVICE/" + testDevice.getUuidId() + "/timeseries/" + DataConstants.SERVER_SCOPE, JacksonUtil.toJsonNode("{\"temperature\":30}"));
|
||||
|
||||
await().alias("update telemetry -> ctx is not initialized -> no calculation perform").atMost(TIMEOUT, TimeUnit.SECONDS)
|
||||
.pollInterval(POLL_INTERVAL, TimeUnit.SECONDS)
|
||||
.untilAsserted(() -> {
|
||||
ObjectNode fahrenheitTemp = getLatestTelemetry(testDevice.getId(), "fahrenheitTemp");
|
||||
assertThat(fahrenheitTemp).isNotNull();
|
||||
|
||||
@ -28,6 +28,7 @@ import org.springframework.boot.test.mock.mockito.SpyBean;
|
||||
import org.springframework.test.context.TestPropertySource;
|
||||
import org.thingsboard.common.util.JacksonUtil;
|
||||
import org.thingsboard.server.actors.ActorSystemContext;
|
||||
import org.thingsboard.server.common.data.DataConstants;
|
||||
import org.thingsboard.server.common.data.Device;
|
||||
import org.thingsboard.server.common.data.DeviceProfile;
|
||||
import org.thingsboard.server.common.data.exception.ThingsboardException;
|
||||
@ -73,6 +74,7 @@ import java.util.stream.Stream;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.awaitility.Awaitility.await;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.anyLong;
|
||||
import static org.mockito.ArgumentMatchers.argThat;
|
||||
@ -325,6 +327,59 @@ public class BaseQueueControllerTest extends AbstractControllerTest {
|
||||
doDelete("/api/queues/" + queue.getUuidId()).andExpect(status().isOk());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testQueueWithReservedName() throws Exception {
|
||||
loginSysAdmin();
|
||||
|
||||
// create queue
|
||||
Queue queue = new Queue();
|
||||
queue.setName(DataConstants.CF_QUEUE_NAME);
|
||||
queue.setTopic("tb_rule_engine.calculated_fields");
|
||||
queue.setPollInterval(25);
|
||||
queue.setPartitions(10);
|
||||
queue.setTenantId(TenantId.SYS_TENANT_ID);
|
||||
queue.setConsumerPerPartition(false);
|
||||
queue.setPackProcessingTimeout(2000);
|
||||
SubmitStrategy submitStrategy = new SubmitStrategy();
|
||||
submitStrategy.setType(SubmitStrategyType.SEQUENTIAL_BY_ORIGINATOR);
|
||||
queue.setSubmitStrategy(submitStrategy);
|
||||
ProcessingStrategy processingStrategy = new ProcessingStrategy();
|
||||
processingStrategy.setType(ProcessingStrategyType.RETRY_ALL);
|
||||
processingStrategy.setRetries(3);
|
||||
processingStrategy.setFailurePercentage(0.7);
|
||||
processingStrategy.setPauseBetweenRetries(3);
|
||||
processingStrategy.setMaxPauseBetweenRetries(5);
|
||||
queue.setProcessingStrategy(processingStrategy);
|
||||
|
||||
doPost("/api/queues?serviceType=" + "TB-RULE-ENGINE", queue)
|
||||
.andExpect(status().isBadRequest())
|
||||
.andExpect(statusReason(containsString(String.format("The queue name '%s' is not allowed. This name is reserved for internal use. Please choose a different name.", DataConstants.CF_QUEUE_NAME))));
|
||||
|
||||
// create queue
|
||||
Queue queue2 = new Queue();
|
||||
queue2.setName(DataConstants.CF_STATES_QUEUE_NAME);
|
||||
queue2.setTopic("tb_rule_engine.calculated_fields");
|
||||
queue2.setPollInterval(25);
|
||||
queue2.setPartitions(10);
|
||||
queue2.setTenantId(TenantId.SYS_TENANT_ID);
|
||||
queue2.setConsumerPerPartition(false);
|
||||
queue2.setPackProcessingTimeout(2000);
|
||||
SubmitStrategy submitStrategy2 = new SubmitStrategy();
|
||||
submitStrategy2.setType(SubmitStrategyType.SEQUENTIAL_BY_ORIGINATOR);
|
||||
queue2.setSubmitStrategy(submitStrategy);
|
||||
ProcessingStrategy processingStrategy2 = new ProcessingStrategy();
|
||||
processingStrategy2.setType(ProcessingStrategyType.RETRY_ALL);
|
||||
processingStrategy2.setRetries(3);
|
||||
processingStrategy2.setFailurePercentage(0.7);
|
||||
processingStrategy2.setPauseBetweenRetries(3);
|
||||
processingStrategy2.setMaxPauseBetweenRetries(5);
|
||||
queue2.setProcessingStrategy(processingStrategy);
|
||||
|
||||
doPost("/api/queues?serviceType=" + "TB-RULE-ENGINE", queue2)
|
||||
.andExpect(status().isBadRequest())
|
||||
.andExpect(statusReason(containsString(String.format("The queue name '%s' is not allowed. This name is reserved for internal use. Please choose a different name.", DataConstants.CF_STATES_QUEUE_NAME))));
|
||||
}
|
||||
|
||||
private Queue saveQueue(Queue queue) {
|
||||
return doPost("/api/queues?serviceType=TB_RULE_ENGINE", queue, Queue.class);
|
||||
}
|
||||
|
||||
@ -428,7 +428,6 @@ public class HashPartitionServiceTest {
|
||||
ReflectionTestUtils.setField(partitionService, "corePartitions", 10);
|
||||
ReflectionTestUtils.setField(partitionService, "cfEventTopic", "tb_cf_event");
|
||||
ReflectionTestUtils.setField(partitionService, "cfStateTopic", "tb_cf_state");
|
||||
ReflectionTestUtils.setField(partitionService, "cfPartitions", 10);
|
||||
ReflectionTestUtils.setField(partitionService, "vcTopic", "tb.vc");
|
||||
ReflectionTestUtils.setField(partitionService, "vcPartitions", 10);
|
||||
ReflectionTestUtils.setField(partitionService, "hashFunctionName", hashFunctionName);
|
||||
|
||||
@ -422,19 +422,19 @@ public class TenantRepo {
|
||||
}
|
||||
|
||||
public String getOwnerName(EntityId ownerId) {
|
||||
if (ownerId == null || (EntityType.CUSTOMER.equals(ownerId.getEntityType()) && CustomerId.NULL_UUID.equals(ownerId.getId()))) {
|
||||
ownerId = tenantId;
|
||||
if (ownerId == null || (ownerId.getEntityType() == EntityType.CUSTOMER && ownerId.isNullUid())) {
|
||||
return getOwnerEntityName(tenantId);
|
||||
}
|
||||
return getEntityName(ownerId);
|
||||
return getOwnerEntityName(ownerId);
|
||||
}
|
||||
|
||||
private String getEntityName(EntityId entityId) {
|
||||
private String getOwnerEntityName(EntityId entityId) {
|
||||
EntityType entityType = entityId.getEntityType();
|
||||
if (entityType == EntityType.TENANT && entityId.getId().equals(TenantId.NULL_UUID)) {
|
||||
return "";
|
||||
}
|
||||
return switch (entityType) {
|
||||
case CUSTOMER, TENANT -> getEntityMap(entityType).get(entityId.getId()).getFields().getName();
|
||||
case CUSTOMER, TENANT -> {
|
||||
EntityFields fields = getEntityMap(entityType).get(entityId.getId()).getFields();
|
||||
yield fields != null ? fields.getName() : "";
|
||||
}
|
||||
default -> throw new RuntimeException("Unsupported entity type: " + entityType);
|
||||
};
|
||||
}
|
||||
|
||||
@ -21,6 +21,7 @@ import org.apache.commons.io.FileUtils;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.context.annotation.Lazy;
|
||||
import org.thingsboard.server.common.data.BaseData;
|
||||
import org.thingsboard.server.common.data.DataConstants;
|
||||
import org.thingsboard.server.common.data.EntityType;
|
||||
import org.thingsboard.server.common.data.StringUtils;
|
||||
import org.thingsboard.server.common.data.id.EntityId;
|
||||
@ -158,6 +159,9 @@ public abstract class DataValidator<D extends BaseData<?>> {
|
||||
|
||||
protected static void validateQueueName(String name) {
|
||||
validateQueueNameOrTopic(name, NAME);
|
||||
if (DataConstants.CF_QUEUE_NAME.equals(name) || DataConstants.CF_STATES_QUEUE_NAME.equals(name)) {
|
||||
throw new DataValidationException(String.format("The queue name '%s' is not allowed. This name is reserved for internal use. Please choose a different name.", name));
|
||||
}
|
||||
}
|
||||
|
||||
protected static void validateQueueTopic(String topic) {
|
||||
|
||||
@ -143,7 +143,6 @@ function additionalComposeEdqsArgs() {
|
||||
function permissionList() {
|
||||
PERMISSION_LIST="
|
||||
799 799 tb-node/log
|
||||
799 799 tb-transports/coap/log
|
||||
799 799 tb-transports/lwm2m/log
|
||||
799 799 tb-transports/http/log
|
||||
799 799 tb-transports/mqtt/log
|
||||
@ -200,29 +199,77 @@ function permissionList() {
|
||||
}
|
||||
|
||||
function checkFolders() {
|
||||
CREATE=false
|
||||
SKIP_CHOWN=false
|
||||
for i in "$@"
|
||||
do
|
||||
case $i in
|
||||
--create)
|
||||
CREATE=true
|
||||
shift
|
||||
;;
|
||||
--skipChown)
|
||||
SKIP_CHOWN=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
# unknown option
|
||||
;;
|
||||
esac
|
||||
done
|
||||
EXIT_CODE=0
|
||||
PERMISSION_LIST=$(permissionList) || exit $?
|
||||
set -e
|
||||
while read -r USR GRP DIR
|
||||
do
|
||||
if [ -z "$DIR" ]; then # skip empty lines
|
||||
IS_EXIST_CHECK_PASSED=false
|
||||
IS_OWNER_CHECK_PASSED=false
|
||||
|
||||
# skip empty lines
|
||||
if [ -z "$DIR" ]; then
|
||||
continue
|
||||
fi
|
||||
MESSAGE="Checking user ${USR} group ${GRP} dir ${DIR}"
|
||||
if [[ -d "$DIR" ]] &&
|
||||
[[ $(ls -ldn "$DIR" | awk '{print $3}') -eq "$USR" ]] &&
|
||||
[[ $(ls -ldn "$DIR" | awk '{print $4}') -eq "$GRP" ]]
|
||||
then
|
||||
MESSAGE="$MESSAGE OK"
|
||||
|
||||
# checks section
|
||||
echo "Checking if dir ${DIR} exists..."
|
||||
if [[ -d "$DIR" ]]; then
|
||||
echo "> OK"
|
||||
IS_EXIST_CHECK_PASSED=true
|
||||
if [ "$SKIP_CHOWN" = false ]; then
|
||||
echo "Checking user ${USR} group ${GRP} ownership for dir ${DIR}..."
|
||||
if [[ $(ls -ldn "$DIR" | awk '{print $3}') -eq "$USR" ]] && [[ $(ls -ldn "$DIR" | awk '{print $4}') -eq "$GRP" ]]; then
|
||||
echo "> OK"
|
||||
IS_OWNER_CHECK_PASSED=true
|
||||
else
|
||||
echo "...ownership check failed"
|
||||
if [ "$CREATE" = false ]; then
|
||||
EXIT_CODE=1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
if [ "$1" = "--create" ]; then
|
||||
echo "Create and chown: user ${USR} group ${GRP} dir ${DIR}"
|
||||
mkdir -p "$DIR" && sudo chown -R "$USR":"$GRP" "$DIR"
|
||||
else
|
||||
echo "$MESSAGE FAILED"
|
||||
echo "...does not exist"
|
||||
if [ "$CREATE" = false ]; then
|
||||
EXIT_CODE=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# create/chown section
|
||||
if [ "$CREATE" = true ]; then
|
||||
if [ "$IS_EXIST_CHECK_PASSED" = false ]; then
|
||||
echo "...will create dir ${DIR}"
|
||||
if [ "$SKIP_CHOWN" = false ]; then
|
||||
echo "...will change ownership to user ${USR} group ${GRP} for dir ${DIR}"
|
||||
mkdir -p "$DIR" && sudo chown -R "$USR":"$GRP" "$DIR" && echo "> OK"
|
||||
else
|
||||
mkdir -p "$DIR" && echo "> OK"
|
||||
fi
|
||||
elif [ "$IS_OWNER_CHECK_PASSED" = false ] && [ "$SKIP_CHOWN" = false ]; then
|
||||
echo "...will change ownership to user ${USR} group ${GRP} for dir ${DIR}"
|
||||
sudo chown -R "$USR":"$GRP" "$DIR" && echo "> OK"
|
||||
fi
|
||||
fi
|
||||
|
||||
done < <(echo "$PERMISSION_LIST")
|
||||
return $EXIT_CODE
|
||||
}
|
||||
|
||||
@ -17,5 +17,12 @@
|
||||
|
||||
set -e
|
||||
source compose-utils.sh
|
||||
checkFolders || exit $?
|
||||
echo "OK"
|
||||
if checkFolders "$@" ; then
|
||||
echo "------"
|
||||
echo "All checks have passed"
|
||||
else
|
||||
CHECK_EXIT_CODE=$?
|
||||
echo "------"
|
||||
echo "Some checks did not pass - check the output"
|
||||
exit $CHECK_EXIT_CODE
|
||||
fi
|
||||
@ -20,7 +20,7 @@ version: '3.0'
|
||||
services:
|
||||
zookeeper:
|
||||
restart: always
|
||||
image: "zookeeper:3.8.0"
|
||||
image: "zookeeper:3.8.1"
|
||||
ports:
|
||||
- "2181"
|
||||
environment:
|
||||
|
||||
@ -17,4 +17,4 @@
|
||||
|
||||
set -e
|
||||
source compose-utils.sh
|
||||
checkFolders --create
|
||||
checkFolders --create "$@"
|
||||
|
||||
@ -53,8 +53,6 @@ ADDITIONAL_COMPOSE_EDQS_ARGS=$(additionalComposeEdqsArgs) || exit $?
|
||||
|
||||
ADDITIONAL_STARTUP_SERVICES=$(additionalStartupServices) || exit $?
|
||||
|
||||
checkFolders --create || exit $?
|
||||
|
||||
if [ ! -z "${ADDITIONAL_STARTUP_SERVICES// }" ]; then
|
||||
|
||||
COMPOSE_ARGS="\
|
||||
|
||||
@ -31,8 +31,6 @@ ADDITIONAL_COMPOSE_MONITORING_ARGS=$(additionalComposeMonitoringArgs) || exit $?
|
||||
|
||||
ADDITIONAL_COMPOSE_EDQS_ARGS=$(additionalComposeEdqsArgs) || exit $?
|
||||
|
||||
checkFolders --create || exit $?
|
||||
|
||||
COMPOSE_ARGS="\
|
||||
-f docker-compose.yml ${ADDITIONAL_CACHE_ARGS} ${ADDITIONAL_COMPOSE_ARGS} ${ADDITIONAL_COMPOSE_QUEUE_ARGS} ${ADDITIONAL_COMPOSE_MONITORING_ARGS} ${ADDITIONAL_COMPOSE_EDQS_ARGS} \
|
||||
up -d"
|
||||
|
||||
@ -46,8 +46,6 @@ ADDITIONAL_COMPOSE_EDQS_ARGS=$(additionalComposeEdqsArgs) || exit $?
|
||||
|
||||
ADDITIONAL_STARTUP_SERVICES=$(additionalStartupServices) || exit $?
|
||||
|
||||
checkFolders --create || exit $?
|
||||
|
||||
COMPOSE_ARGS_PULL="\
|
||||
-f docker-compose.yml ${ADDITIONAL_CACHE_ARGS} ${ADDITIONAL_COMPOSE_ARGS} ${ADDITIONAL_COMPOSE_QUEUE_ARGS}
|
||||
${ADDITIONAL_COMPOSE_EDQS_ARGS} \
|
||||
|
||||
@ -29,7 +29,6 @@ ENV CASSANDRA_DATA=/data/cassandra
|
||||
ENV SPRING_DRIVER_CLASS_NAME=org.postgresql.Driver
|
||||
ENV SPRING_DATASOURCE_URL=jdbc:postgresql://localhost:5432/thingsboard
|
||||
ENV SPRING_DATASOURCE_USERNAME=${pkg.user}
|
||||
ENV SPRING_DATASOURCE_PASSWORD=postgres
|
||||
|
||||
ENV CASSANDRA_HOST=localhost
|
||||
ENV CASSANDRA_PORT=9042
|
||||
|
||||
@ -29,7 +29,6 @@ ENV PATH=$PATH:/usr/lib/postgresql/$PG_MAJOR/bin
|
||||
ENV SPRING_DRIVER_CLASS_NAME=org.postgresql.Driver
|
||||
ENV SPRING_DATASOURCE_URL=jdbc:postgresql://localhost:5432/thingsboard
|
||||
ENV SPRING_DATASOURCE_USERNAME=${pkg.user}
|
||||
ENV SPRING_DATASOURCE_PASSWORD=postgres
|
||||
|
||||
ENV PGLOG=/var/log/postgres
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user