Merge pull request #8682 from AndriiLandiak/feature/redis-sentinel

[3.5.2] Redis Sentinel support
This commit is contained in:
Andrew Shvayka 2023-06-07 15:24:18 +03:00 committed by GitHub
commit 7bb8f8732c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 432 additions and 54 deletions

View File

@ -502,7 +502,7 @@ cache:
spring.data.redis.repositories.enabled: false spring.data.redis.repositories.enabled: false
redis: redis:
# standalone or cluster # standalone or cluster or sentinel
connection: connection:
type: "${REDIS_CONNECTION_TYPE:standalone}" type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone: standalone:
@ -522,6 +522,16 @@ redis:
nodes: "${REDIS_NODES:}" nodes: "${REDIS_NODES:}"
# Maximum number of redirects to follow when executing commands across the cluster. # Maximum number of redirects to follow when executing commands across the cluster.
max-redirects: "${REDIS_MAX_REDIRECTS:12}" max-redirects: "${REDIS_MAX_REDIRECTS:12}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
sentinel:
# name of master node
master: "${REDIS_MASTER:}"
# comma-separated list of "host:port" pairs of sentinels
sentinels: "${REDIS_SENTINELS:}"
# password to authenticate with sentinel
password: "${REDIS_SENTINEL_PASSWORD:}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
# db index # db index
db: "${REDIS_DB:0}" db: "${REDIS_DB:0}"

View File

@ -26,14 +26,19 @@ import org.springframework.core.convert.converter.ConverterRegistry;
import org.springframework.data.redis.cache.RedisCacheConfiguration; import org.springframework.data.redis.cache.RedisCacheConfiguration;
import org.springframework.data.redis.cache.RedisCacheManager; import org.springframework.data.redis.cache.RedisCacheManager;
import org.springframework.data.redis.connection.RedisConnectionFactory; import org.springframework.data.redis.connection.RedisConnectionFactory;
import org.springframework.data.redis.connection.RedisNode;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
import org.springframework.data.redis.core.RedisTemplate; import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.format.support.DefaultFormattingConversionService; import org.springframework.format.support.DefaultFormattingConversionService;
import org.springframework.util.Assert; import org.springframework.util.Assert;
import org.thingsboard.server.common.data.StringUtils;
import org.thingsboard.server.common.data.id.EntityId; import org.thingsboard.server.common.data.id.EntityId;
import redis.clients.jedis.JedisPoolConfig; import redis.clients.jedis.JedisPoolConfig;
import java.time.Duration; import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@Configuration @Configuration
@ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "redis") @ConditionalOnProperty(prefix = "cache", value = "type", havingValue = "redis")
@ -41,6 +46,9 @@ import java.time.Duration;
@Data @Data
public abstract class TBRedisCacheConfiguration { public abstract class TBRedisCacheConfiguration {
private static final String COMMA = ",";
private static final String COLON = ":";
@Value("${redis.evictTtlInMs:60000}") @Value("${redis.evictTtlInMs:60000}")
private int evictTtlInMs; private int evictTtlInMs;
@ -126,4 +134,19 @@ public abstract class TBRedisCacheConfiguration {
poolConfig.setBlockWhenExhausted(blockWhenExhausted); poolConfig.setBlockWhenExhausted(blockWhenExhausted);
return poolConfig; return poolConfig;
} }
protected List<RedisNode> getNodes(String nodes) {
List<RedisNode> result;
if (StringUtils.isBlank(nodes)) {
result = Collections.emptyList();
} else {
result = new ArrayList<>();
for (String hostPort : nodes.split(COMMA)) {
String host = hostPort.split(COLON)[0];
int port = Integer.parseInt(hostPort.split(COLON)[1]);
result.add(new RedisNode(host, port));
}
}
return result;
}
} }

View File

@ -20,22 +20,13 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisClusterConfiguration; import org.springframework.data.redis.connection.RedisClusterConfiguration;
import org.springframework.data.redis.connection.RedisNode;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory; import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
import org.thingsboard.server.common.data.StringUtils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@Configuration @Configuration
@ConditionalOnMissingBean(TbCaffeineCacheConfiguration.class) @ConditionalOnMissingBean(TbCaffeineCacheConfiguration.class)
@ConditionalOnProperty(prefix = "redis.connection", value = "type", havingValue = "cluster") @ConditionalOnProperty(prefix = "redis.connection", value = "type", havingValue = "cluster")
public class TBRedisClusterConfiguration extends TBRedisCacheConfiguration { public class TBRedisClusterConfiguration extends TBRedisCacheConfiguration {
private static final String COMMA = ",";
private static final String COLON = ":";
@Value("${redis.cluster.nodes:}") @Value("${redis.cluster.nodes:}")
private String clusterNodes; private String clusterNodes;
@ -59,19 +50,4 @@ public class TBRedisClusterConfiguration extends TBRedisCacheConfiguration {
return new JedisConnectionFactory(clusterConfiguration, buildPoolConfig()); return new JedisConnectionFactory(clusterConfiguration, buildPoolConfig());
} }
} }
private List<RedisNode> getNodes(String nodes) {
List<RedisNode> result;
if (StringUtils.isBlank(nodes)) {
result = Collections.emptyList();
} else {
result = new ArrayList<>();
for (String hostPort : nodes.split(COMMA)) {
String host = hostPort.split(COLON)[0];
Integer port = Integer.valueOf(hostPort.split(COLON)[1]);
result.add(new RedisNode(host, port));
}
}
return result;
}
} }

View File

@ -0,0 +1,62 @@
/**
* Copyright © 2016-2023 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.cache;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Configuration;
import org.springframework.data.redis.connection.RedisSentinelConfiguration;
import org.springframework.data.redis.connection.jedis.JedisConnectionFactory;
@Configuration
@ConditionalOnMissingBean(TbCaffeineCacheConfiguration.class)
@ConditionalOnProperty(prefix = "redis.connection", value = "type", havingValue = "sentinel")
public class TBRedisSentinelConfiguration extends TBRedisCacheConfiguration {
@Value("${redis.sentinel.master:}")
private String master;
@Value("${redis.sentinel.sentinels:}")
private String sentinels;
@Value("${redis.sentinel.password:}")
private String sentinelPassword;
@Value("${redis.sentinel.useDefaultPoolConfig:true}")
private boolean useDefaultPoolConfig;
@Value("${redis.db:}")
private Integer database;
@Value("${redis.password:}")
private String password;
public JedisConnectionFactory loadFactory() {
RedisSentinelConfiguration redisSentinelConfiguration = new RedisSentinelConfiguration();
redisSentinelConfiguration.setMaster(master);
redisSentinelConfiguration.setSentinels(getNodes(sentinels));
redisSentinelConfiguration.setSentinelPassword(sentinelPassword);
redisSentinelConfiguration.setPassword(password);
redisSentinelConfiguration.setDatabase(database);
if (useDefaultPoolConfig) {
return new JedisConnectionFactory(redisSentinelConfiguration);
} else {
return new JedisConnectionFactory(redisSentinelConfiguration, buildPoolConfig());
}
}
}

View File

@ -1,6 +1,6 @@
TB_QUEUE_TYPE=kafka TB_QUEUE_TYPE=kafka
# redis or redis-cluster # redis or redis-cluster or redis-sentinel
CACHE=redis CACHE=redis
DOCKER_REPO=thingsboard DOCKER_REPO=thingsboard

3
docker/.gitignore vendored
View File

@ -12,6 +12,9 @@ tb-node/redis-cluster-data-2/**
tb-node/redis-cluster-data-3/** tb-node/redis-cluster-data-3/**
tb-node/redis-cluster-data-4/** tb-node/redis-cluster-data-4/**
tb-node/redis-cluster-data-5/** tb-node/redis-cluster-data-5/**
tb-node/redis-sentinel-data-master/**
tb-node/redis-sentinel-data-slave/**
tb-node/redis-sentinel-data-sentinel/**
tb-node/redis-data/** tb-node/redis-data/**
!.env !.env

View File

@ -21,8 +21,9 @@ In order to set cache type change the value of `CACHE` variable in `.env` file t
- `redis` - use Redis standalone cache (1 node - 1 master); - `redis` - use Redis standalone cache (1 node - 1 master);
- `redis-cluster` - use Redis cluster cache (6 nodes - 3 masters, 3 slaves); - `redis-cluster` - use Redis cluster cache (6 nodes - 3 masters, 3 slaves);
- `redis-sentinel` - use Redis sentinel cache (3 nodes - 1 master, 1 slave, 1 sentinel)
**NOTE**: According to the cache type corresponding docker service will be deployed (see `docker-compose.redis.yml`, `docker-compose.redis-cluster.yml` for details). **NOTE**: According to the cache type corresponding docker service will be deployed (see `docker-compose.redis.yml`, `docker-compose.redis-cluster.yml`, `docker-compose.redis-sentinel.yml` for details).
Execute the following command to create log folders for the services and chown of these folders to the docker container users. Execute the following command to create log folders for the services and chown of these folders to the docker container users.
To be able to change user, **chown** command is used, which requires sudo permissions (script will request password for a sudo access): To be able to change user, **chown** command is used, which requires sudo permissions (script will request password for a sudo access):

View File

@ -0,0 +1,7 @@
CACHE_TYPE=redis
REDIS_CONNECTION_TYPE=sentinel
REDIS_MASTER=mymaster
REDIS_SENTINELS=redis-sentinel:26379
REDIS_SENTINEL_PASSWORD=sentinel
REDIS_USE_DEFAULT_POOL_CONFIG=false
REDIS_PASSWORD=thingsboard

View File

@ -84,8 +84,11 @@ function additionalComposeCacheArgs() {
redis-cluster) redis-cluster)
CACHE_COMPOSE_ARGS="-f docker-compose.redis-cluster.yml" CACHE_COMPOSE_ARGS="-f docker-compose.redis-cluster.yml"
;; ;;
redis-sentinel)
CACHE_COMPOSE_ARGS="-f docker-compose.redis-sentinel.yml"
;;
*) *)
echo "Unknown CACHE value specified in the .env file: '${CACHE}'. Should be either 'redis' or 'redis-cluster'." >&2 echo "Unknown CACHE value specified in the .env file: '${CACHE}'. Should be either 'redis' or 'redis-cluster' or 'redis-sentinel'." >&2
exit 1 exit 1
esac esac
echo $CACHE_COMPOSE_ARGS echo $CACHE_COMPOSE_ARGS
@ -114,8 +117,11 @@ function additionalStartupServices() {
redis-cluster) redis-cluster)
ADDITIONAL_STARTUP_SERVICES="$ADDITIONAL_STARTUP_SERVICES redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5" ADDITIONAL_STARTUP_SERVICES="$ADDITIONAL_STARTUP_SERVICES redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5"
;; ;;
redis-sentinel)
ADDITIONAL_STARTUP_SERVICES="$ADDITIONAL_STARTUP_SERVICES redis-master redis-slave redis-sentinel"
;;
*) *)
echo "Unknown CACHE value specified in the .env file: '${CACHE}'. Should be either 'redis' or 'redis-cluster'." >&2 echo "Unknown CACHE value specified in the .env file: '${CACHE}'. Should be either 'redis' or 'redis-cluster' or 'redis-sentinel'." >&2
exit 1 exit 1
esac esac
@ -160,8 +166,15 @@ function permissionList() {
1001 1001 tb-node/redis-cluster-data-5 1001 1001 tb-node/redis-cluster-data-5
" "
;; ;;
redis-sentinel)
PERMISSION_LIST="$PERMISSION_LIST
1001 1001 tb-node/redis-sentinel-data-master
1001 1001 tb-node/redis-sentinel-data-slave
1001 1001 tb-node/redis-sentinel-data-sentinel
"
;;
*) *)
echo "Unknown CACHE value specified in the .env file: '${CACHE}'. Should be either 'redis' or 'redis-cluster'." >&2 echo "Unknown CACHE value specified in the .env file: '${CACHE}'. Should be either 'redis' or 'redis-cluster' or 'redis-sentinel'." >&2
exit 1 exit 1
esac esac

View File

@ -0,0 +1,40 @@
#
# Copyright © 2016-2023 The Thingsboard Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version: '3.0'
services:
# Redis sentinel
redis-master:
volumes:
- redis-sentinel-data-master:/bitnami/redis/data
redis-slave:
volumes:
- redis-sentinel-data-slave:/bitnami/redis/data
redis-sentinel:
volumes:
- redis-sentinel-data-sentinel:/bitnami/redis/data
volumes:
redis-sentinel-data-master:
external:
name: ${REDIS_SENTINEL_DATA_VOLUME_MASTER}
redis-sentinel-data-slave:
external:
name: ${REDIS_SENTINEL_DATA_VOLUME_SLAVE}
redis-sentinel-data-sentinel:
external:
name: ${REDIS_SENTINEL_DATA_VOLUME_SENTINEL}

View File

@ -0,0 +1,119 @@
#
# Copyright © 2016-2023 The Thingsboard Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version: '3.0'
services:
# Redis sentinel
redis-master:
image: 'bitnami/redis:7.0'
volumes:
- ./tb-node/redis-sentinel-data-master:/bitnami/redis/data
environment:
- 'REDIS_REPLICATION_MODE=master'
- 'REDIS_PASSWORD=thingsboard'
redis-slave:
image: 'bitnami/redis:7.0'
volumes:
- ./tb-node/redis-sentinel-data-slave:/bitnami/redis/data
environment:
- 'REDIS_REPLICATION_MODE=slave'
- 'REDIS_MASTER_HOST=redis-master'
- 'REDIS_MASTER_PASSWORD=thingsboard'
- 'REDIS_PASSWORD=thingsboard'
depends_on:
- redis-master
redis-sentinel:
image: 'bitnami/redis-sentinel:7.0'
volumes:
- ./tb-node/redis-sentinel-data-sentinel:/bitnami/redis/data
environment:
- 'REDIS_MASTER_HOST=redis-master'
- 'REDIS_MASTER_SET=mymaster'
- 'REDIS_SENTINEL_PASSWORD=sentinel'
- 'REDIS_MASTER_PASSWORD=thingsboard'
depends_on:
- redis-master
- redis-slave
# ThingsBoard setup to use redis-sentinel
tb-core1:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-core2:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-rule-engine1:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-rule-engine2:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-mqtt-transport1:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-mqtt-transport2:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-http-transport1:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-http-transport2:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-coap-transport:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-lwm2m-transport:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-snmp-transport:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-vc-executor1:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel
tb-vc-executor2:
env_file:
- cache-redis-sentinel.env
depends_on:
- redis-sentinel

View File

@ -26,6 +26,10 @@ As result, in REPOSITORY column, next images should be present:
mvn clean install -DblackBoxTests.skip=false -DblackBoxTests.redisCluster=true mvn clean install -DblackBoxTests.skip=false -DblackBoxTests.redisCluster=true
- Run the black box tests in the [msa/black-box-tests](../black-box-tests) directory with Redis sentinel:
mvn clean install -DblackBoxTests.skip=false -DblackBoxTests.redisSentinel=true
- Run the black box tests in the [msa/black-box-tests](../black-box-tests) directory in Hybrid mode (postgres + cassandra): - Run the black box tests in the [msa/black-box-tests](../black-box-tests) directory in Hybrid mode (postgres + cassandra):
mvn clean install -DblackBoxTests.skip=false -DblackBoxTests.hybridMode=true mvn clean install -DblackBoxTests.skip=false -DblackBoxTests.hybridMode=true

View File

@ -43,6 +43,7 @@ import static org.testng.Assert.fail;
@Slf4j @Slf4j
public class ContainerTestSuite { public class ContainerTestSuite {
final static boolean IS_REDIS_CLUSTER = Boolean.parseBoolean(System.getProperty("blackBoxTests.redisCluster")); final static boolean IS_REDIS_CLUSTER = Boolean.parseBoolean(System.getProperty("blackBoxTests.redisCluster"));
final static boolean IS_REDIS_SENTINEL = Boolean.parseBoolean(System.getProperty("blackBoxTests.redisSentinel"));
final static boolean IS_HYBRID_MODE = Boolean.parseBoolean(System.getProperty("blackBoxTests.hybridMode")); final static boolean IS_HYBRID_MODE = Boolean.parseBoolean(System.getProperty("blackBoxTests.hybridMode"));
final static String QUEUE_TYPE = System.getProperty("blackBoxTests.queue", "kafka"); final static String QUEUE_TYPE = System.getProperty("blackBoxTests.queue", "kafka");
private static final String SOURCE_DIR = "./../../docker/"; private static final String SOURCE_DIR = "./../../docker/";
@ -80,8 +81,9 @@ public class ContainerTestSuite {
installTb = new ThingsBoardDbInstaller(); installTb = new ThingsBoardDbInstaller();
installTb.createVolumes(); installTb.createVolumes();
log.info("System property of blackBoxTests.redisCluster is {}", IS_REDIS_CLUSTER); log.info("System property of blackBoxTests.redisCluster is {}", IS_REDIS_CLUSTER);
log.info("System property of blackBoxTests.redisSentinel is {}", IS_REDIS_SENTINEL);
log.info("System property of blackBoxTests.hybridMode is {}", IS_HYBRID_MODE); log.info("System property of blackBoxTests.hybridMode is {}", IS_HYBRID_MODE);
boolean skipTailChildContainers = Boolean.valueOf(System.getProperty("blackBoxTests.skipTailChildContainers")); boolean skipTailChildContainers = Boolean.parseBoolean(System.getProperty("blackBoxTests.skipTailChildContainers"));
try { try {
final String targetDir = FileUtils.getTempDirectoryPath() + "/" + "ContainerTestSuite-" + UUID.randomUUID() + "/"; final String targetDir = FileUtils.getTempDirectoryPath() + "/" + "ContainerTestSuite-" + UUID.randomUUID() + "/";
log.info("targetDir {}", targetDir); log.info("targetDir {}", targetDir);
@ -109,8 +111,8 @@ public class ContainerTestSuite {
new File(targetDir + (IS_HYBRID_MODE ? "docker-compose.hybrid-test-extras.yml" : "docker-compose.postgres-test-extras.yml")), new File(targetDir + (IS_HYBRID_MODE ? "docker-compose.hybrid-test-extras.yml" : "docker-compose.postgres-test-extras.yml")),
new File(targetDir + "docker-compose.postgres.volumes.yml"), new File(targetDir + "docker-compose.postgres.volumes.yml"),
new File(targetDir + "docker-compose." + QUEUE_TYPE + ".yml"), new File(targetDir + "docker-compose." + QUEUE_TYPE + ".yml"),
new File(targetDir + (IS_REDIS_CLUSTER ? "docker-compose.redis-cluster.yml" : "docker-compose.redis.yml")), new File(targetDir + resolveRedisComposeFile()),
new File(targetDir + (IS_REDIS_CLUSTER ? "docker-compose.redis-cluster.volumes.yml" : "docker-compose.redis.volumes.yml")), new File(targetDir + resolveRedisComposeVolumesFile()),
new File(targetDir + ("docker-selenium.yml")) new File(targetDir + ("docker-selenium.yml"))
)); ));
@ -154,6 +156,7 @@ public class ContainerTestSuite {
testContainer = new DockerComposeContainerImpl<>(composeFiles) testContainer = new DockerComposeContainerImpl<>(composeFiles)
.withPull(false) .withPull(false)
.withLocalCompose(true) .withLocalCompose(true)
.withOptions("--compatibility")
.withTailChildContainers(!skipTailChildContainers) .withTailChildContainers(!skipTailChildContainers)
.withEnv(installTb.getEnv()) .withEnv(installTb.getEnv())
.withEnv(queueEnv) .withEnv(queueEnv)
@ -175,6 +178,27 @@ public class ContainerTestSuite {
fail("Failed to create test container"); fail("Failed to create test container");
} }
} }
private static String resolveRedisComposeFile() {
if (IS_REDIS_CLUSTER) {
return "docker-compose.redis-cluster.yml";
}
if (IS_REDIS_SENTINEL) {
return "docker-compose.redis-sentinel.yml";
}
return "docker-compose.redis.yml";
}
private static String resolveRedisComposeVolumesFile() {
if (IS_REDIS_CLUSTER) {
return "docker-compose.redis-cluster.volumes.yml";
}
if (IS_REDIS_SENTINEL) {
return "docker-compose.redis-sentinel.volumes.yml";
}
return "docker-compose.redis.volumes.yml";
}
public void stop() { public void stop() {
if (isActive) { if (isActive) {
testContainer.stop(); testContainer.stop();

View File

@ -32,12 +32,14 @@ import java.util.stream.IntStream;
public class ThingsBoardDbInstaller { public class ThingsBoardDbInstaller {
final static boolean IS_REDIS_CLUSTER = Boolean.parseBoolean(System.getProperty("blackBoxTests.redisCluster")); final static boolean IS_REDIS_CLUSTER = Boolean.parseBoolean(System.getProperty("blackBoxTests.redisCluster"));
final static boolean IS_REDIS_SENTINEL = Boolean.parseBoolean(System.getProperty("blackBoxTests.redisSentinel"));
final static boolean IS_HYBRID_MODE = Boolean.parseBoolean(System.getProperty("blackBoxTests.hybridMode")); final static boolean IS_HYBRID_MODE = Boolean.parseBoolean(System.getProperty("blackBoxTests.hybridMode"));
private final static String POSTGRES_DATA_VOLUME = "tb-postgres-test-data-volume"; private final static String POSTGRES_DATA_VOLUME = "tb-postgres-test-data-volume";
private final static String CASSANDRA_DATA_VOLUME = "tb-cassandra-test-data-volume"; private final static String CASSANDRA_DATA_VOLUME = "tb-cassandra-test-data-volume";
private final static String REDIS_DATA_VOLUME = "tb-redis-data-volume"; private final static String REDIS_DATA_VOLUME = "tb-redis-data-volume";
private final static String REDIS_CLUSTER_DATA_VOLUME = "tb-redis-cluster-data-volume"; private final static String REDIS_CLUSTER_DATA_VOLUME = "tb-redis-cluster-data-volume";
private final static String REDIS_SENTINEL_DATA_VOLUME = "tb-redis-sentinel-data-volume";
private final static String TB_LOG_VOLUME = "tb-log-test-volume"; private final static String TB_LOG_VOLUME = "tb-log-test-volume";
private final static String TB_COAP_TRANSPORT_LOG_VOLUME = "tb-coap-transport-log-test-volume"; private final static String TB_COAP_TRANSPORT_LOG_VOLUME = "tb-coap-transport-log-test-volume";
private final static String TB_LWM2M_TRANSPORT_LOG_VOLUME = "tb-lwm2m-transport-log-test-volume"; private final static String TB_LWM2M_TRANSPORT_LOG_VOLUME = "tb-lwm2m-transport-log-test-volume";
@ -54,6 +56,7 @@ public class ThingsBoardDbInstaller {
private final String redisDataVolume; private final String redisDataVolume;
private final String redisClusterDataVolume; private final String redisClusterDataVolume;
private final String redisSentinelDataVolume;
private final String tbLogVolume; private final String tbLogVolume;
private final String tbCoapTransportLogVolume; private final String tbCoapTransportLogVolume;
private final String tbLwm2mTransportLogVolume; private final String tbLwm2mTransportLogVolume;
@ -65,6 +68,7 @@ public class ThingsBoardDbInstaller {
public ThingsBoardDbInstaller() { public ThingsBoardDbInstaller() {
log.info("System property of blackBoxTests.redisCluster is {}", IS_REDIS_CLUSTER); log.info("System property of blackBoxTests.redisCluster is {}", IS_REDIS_CLUSTER);
log.info("System property of blackBoxTests.redisCluster is {}", IS_REDIS_SENTINEL);
log.info("System property of blackBoxTests.hybridMode is {}", IS_HYBRID_MODE); log.info("System property of blackBoxTests.hybridMode is {}", IS_HYBRID_MODE);
List<File> composeFiles = new ArrayList<>(Arrays.asList( List<File> composeFiles = new ArrayList<>(Arrays.asList(
new File("./../../docker/docker-compose.yml"), new File("./../../docker/docker-compose.yml"),
@ -73,12 +77,8 @@ public class ThingsBoardDbInstaller {
? new File("./../../docker/docker-compose.hybrid.yml") ? new File("./../../docker/docker-compose.hybrid.yml")
: new File("./../../docker/docker-compose.postgres.yml"), : new File("./../../docker/docker-compose.postgres.yml"),
new File("./../../docker/docker-compose.postgres.volumes.yml"), new File("./../../docker/docker-compose.postgres.volumes.yml"),
IS_REDIS_CLUSTER resolveRedisComposeFile(),
? new File("./../../docker/docker-compose.redis-cluster.yml") resolveRedisComposeVolumesFile()
: new File("./../../docker/docker-compose.redis.yml"),
IS_REDIS_CLUSTER
? new File("./../../docker/docker-compose.redis-cluster.volumes.yml")
: new File("./../../docker/docker-compose.redis.volumes.yml")
)); ));
if (IS_HYBRID_MODE) { if (IS_HYBRID_MODE) {
composeFiles.add(new File("./../../docker/docker-compose.cassandra.volumes.yml")); composeFiles.add(new File("./../../docker/docker-compose.cassandra.volumes.yml"));
@ -94,6 +94,7 @@ public class ThingsBoardDbInstaller {
cassandraDataVolume = project + "_" + CASSANDRA_DATA_VOLUME; cassandraDataVolume = project + "_" + CASSANDRA_DATA_VOLUME;
redisDataVolume = project + "_" + REDIS_DATA_VOLUME; redisDataVolume = project + "_" + REDIS_DATA_VOLUME;
redisClusterDataVolume = project + "_" + REDIS_CLUSTER_DATA_VOLUME; redisClusterDataVolume = project + "_" + REDIS_CLUSTER_DATA_VOLUME;
redisSentinelDataVolume = project + "_" + REDIS_SENTINEL_DATA_VOLUME;
tbLogVolume = project + "_" + TB_LOG_VOLUME; tbLogVolume = project + "_" + TB_LOG_VOLUME;
tbCoapTransportLogVolume = project + "_" + TB_COAP_TRANSPORT_LOG_VOLUME; tbCoapTransportLogVolume = project + "_" + TB_COAP_TRANSPORT_LOG_VOLUME;
tbLwm2mTransportLogVolume = project + "_" + TB_LWM2M_TRANSPORT_LOG_VOLUME; tbLwm2mTransportLogVolume = project + "_" + TB_LWM2M_TRANSPORT_LOG_VOLUME;
@ -121,12 +122,36 @@ public class ThingsBoardDbInstaller {
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
env.put("REDIS_CLUSTER_DATA_VOLUME_" + i, redisClusterDataVolume + '-' + i); env.put("REDIS_CLUSTER_DATA_VOLUME_" + i, redisClusterDataVolume + '-' + i);
} }
} else if (IS_REDIS_SENTINEL) {
env.put("REDIS_SENTINEL_DATA_VOLUME_MASTER", redisSentinelDataVolume + "-" + "master");
env.put("REDIS_SENTINEL_DATA_VOLUME_SLAVE", redisSentinelDataVolume + "-" + "slave");
env.put("REDIS_SENTINEL_DATA_VOLUME_SENTINEL", redisSentinelDataVolume + "-" + "sentinel");
} else { } else {
env.put("REDIS_DATA_VOLUME", redisDataVolume); env.put("REDIS_DATA_VOLUME", redisDataVolume);
} }
dockerCompose.withEnv(env); dockerCompose.withEnv(env);
} }
private static File resolveRedisComposeVolumesFile() {
if (IS_REDIS_CLUSTER) {
return new File("./../../docker/docker-compose.redis-cluster.volumes.yml");
}
if (IS_REDIS_SENTINEL) {
return new File("./../../docker/docker-compose.redis-sentinel.volumes.yml");
}
return new File("./../../docker/docker-compose.redis.volumes.yml");
}
private static File resolveRedisComposeFile() {
if (IS_REDIS_CLUSTER) {
return new File("./../../docker/docker-compose.redis-cluster.yml");
}
if (IS_REDIS_SENTINEL) {
return new File("./../../docker/docker-compose.redis-sentinel.yml");
}
return new File("./../../docker/docker-compose.redis.yml");
}
public Map<String, String> getEnv() { public Map<String, String> getEnv() {
return env; return env;
} }
@ -163,18 +188,30 @@ public class ThingsBoardDbInstaller {
dockerCompose.withCommand("volume create " + tbVcExecutorLogVolume); dockerCompose.withCommand("volume create " + tbVcExecutorLogVolume);
dockerCompose.invokeDocker(); dockerCompose.invokeDocker();
String additionalServices = ""; StringBuilder additionalServices = new StringBuilder();
if (IS_HYBRID_MODE) { if (IS_HYBRID_MODE) {
additionalServices += " cassandra"; additionalServices.append(" cassandra");
} }
if (IS_REDIS_CLUSTER) { if (IS_REDIS_CLUSTER) {
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
additionalServices = additionalServices + " redis-node-" + i; additionalServices.append(" redis-node-").append(i);
dockerCompose.withCommand("volume create " + redisClusterDataVolume + '-' + i); dockerCompose.withCommand("volume create " + redisClusterDataVolume + '-' + i);
dockerCompose.invokeDocker(); dockerCompose.invokeDocker();
} }
} else if (IS_REDIS_SENTINEL) {
additionalServices.append(" redis-master");
dockerCompose.withCommand("volume create " + redisSentinelDataVolume +"-" + "master");
dockerCompose.invokeDocker();
additionalServices.append(" redis-slave");
dockerCompose.withCommand("volume create " + redisSentinelDataVolume + '-' + "slave");
dockerCompose.invokeDocker();
additionalServices.append(" redis-sentinel");
dockerCompose.withCommand("volume create " + redisSentinelDataVolume + '-' + "sentinel");
dockerCompose.invokeDocker();
} else { } else {
additionalServices += " redis"; additionalServices.append(" redis");
dockerCompose.withCommand("volume create " + redisDataVolume); dockerCompose.withCommand("volume create " + redisDataVolume);
dockerCompose.invokeDocker(); dockerCompose.invokeDocker();
} }
@ -189,7 +226,7 @@ public class ThingsBoardDbInstaller {
try { try {
dockerCompose.withCommand("down -v"); dockerCompose.withCommand("down -v");
dockerCompose.invokeCompose(); dockerCompose.invokeCompose();
} catch (Exception e) {} } catch (Exception ignored) {}
} }
} }
@ -204,13 +241,22 @@ public class ThingsBoardDbInstaller {
dockerCompose.withCommand("volume rm -f " + postgresDataVolume + " " + tbLogVolume + dockerCompose.withCommand("volume rm -f " + postgresDataVolume + " " + tbLogVolume +
" " + tbCoapTransportLogVolume + " " + tbLwm2mTransportLogVolume + " " + tbHttpTransportLogVolume + " " + tbCoapTransportLogVolume + " " + tbLwm2mTransportLogVolume + " " + tbHttpTransportLogVolume +
" " + tbMqttTransportLogVolume + " " + tbSnmpTransportLogVolume + " " + tbVcExecutorLogVolume + " " + tbMqttTransportLogVolume + " " + tbSnmpTransportLogVolume + " " + tbVcExecutorLogVolume + resolveRedisComposeVolumeLog());
(IS_REDIS_CLUSTER
? IntStream.range(0, 6).mapToObj(i -> " " + redisClusterDataVolume + '-' + i).collect(Collectors.joining())
: redisDataVolume));
dockerCompose.invokeDocker(); dockerCompose.invokeDocker();
} }
private String resolveRedisComposeVolumeLog() {
if (IS_REDIS_CLUSTER) {
return IntStream.range(0, 6).mapToObj(i -> " " + redisClusterDataVolume + "-" + i).collect(Collectors.joining());
}
if (IS_REDIS_SENTINEL) {
return redisSentinelDataVolume + "-" + "master " + " " +
redisSentinelDataVolume + "-" + "slave" + " " +
redisSentinelDataVolume + " " + "sentinel";
}
return redisDataVolume;
}
private void copyLogs(String volumeName, String targetDir) { private void copyLogs(String volumeName, String targetDir) {
File tbLogsDir = new File(targetDir); File tbLogsDir = new File(targetDir);
tbLogsDir.mkdirs(); tbLogsDir.mkdirs();

View File

@ -46,7 +46,7 @@ cache:
type: "${CACHE_TYPE:redis}" type: "${CACHE_TYPE:redis}"
redis: redis:
# standalone or cluster # standalone or cluster or sentinel
connection: connection:
type: "${REDIS_CONNECTION_TYPE:standalone}" type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone: standalone:
@ -66,6 +66,16 @@ redis:
nodes: "${REDIS_NODES:}" nodes: "${REDIS_NODES:}"
# Maximum number of redirects to follow when executing commands across the cluster. # Maximum number of redirects to follow when executing commands across the cluster.
max-redirects: "${REDIS_MAX_REDIRECTS:12}" max-redirects: "${REDIS_MAX_REDIRECTS:12}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
sentinel:
# name of master node
master: "${REDIS_MASTER:}"
# comma-separated list of "host:port" pairs of sentinels
sentinels: "${REDIS_SENTINELS:}"
# password to authenticate with sentinel
password: "${REDIS_SENTINEL_PASSWORD:}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
# db index # db index
db: "${REDIS_DB:0}" db: "${REDIS_DB:0}"

View File

@ -73,7 +73,7 @@ cache:
type: "${CACHE_TYPE:redis}" type: "${CACHE_TYPE:redis}"
redis: redis:
# standalone or cluster # standalone or cluster or sentinel
connection: connection:
type: "${REDIS_CONNECTION_TYPE:standalone}" type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone: standalone:
@ -93,6 +93,16 @@ redis:
nodes: "${REDIS_NODES:}" nodes: "${REDIS_NODES:}"
# Maximum number of redirects to follow when executing commands across the cluster. # Maximum number of redirects to follow when executing commands across the cluster.
max-redirects: "${REDIS_MAX_REDIRECTS:12}" max-redirects: "${REDIS_MAX_REDIRECTS:12}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
sentinel:
# name of master node
master: "${REDIS_MASTER:}"
# comma-separated list of "host:port" pairs of sentinels
sentinels: "${REDIS_SENTINELS:}"
# password to authenticate with sentinel
password: "${REDIS_SENTINEL_PASSWORD:}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
# db index # db index
db: "${REDIS_DB:0}" db: "${REDIS_DB:0}"

View File

@ -46,7 +46,7 @@ cache:
type: "${CACHE_TYPE:redis}" type: "${CACHE_TYPE:redis}"
redis: redis:
# standalone or cluster # standalone or cluster or sentinel
connection: connection:
type: "${REDIS_CONNECTION_TYPE:standalone}" type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone: standalone:
@ -66,6 +66,16 @@ redis:
nodes: "${REDIS_NODES:}" nodes: "${REDIS_NODES:}"
# Maximum number of redirects to follow when executing commands across the cluster. # Maximum number of redirects to follow when executing commands across the cluster.
max-redirects: "${REDIS_MAX_REDIRECTS:12}" max-redirects: "${REDIS_MAX_REDIRECTS:12}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
sentinel:
# name of master node
master: "${REDIS_MASTER:}"
# comma-separated list of "host:port" pairs of sentinels
sentinels: "${REDIS_SENTINELS:}"
# password to authenticate with sentinel
password: "${REDIS_SENTINEL_PASSWORD:}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
# db index # db index
db: "${REDIS_DB:0}" db: "${REDIS_DB:0}"

View File

@ -46,7 +46,7 @@ cache:
type: "${CACHE_TYPE:redis}" type: "${CACHE_TYPE:redis}"
redis: redis:
# standalone or cluster # standalone or cluster or sentinel
connection: connection:
type: "${REDIS_CONNECTION_TYPE:standalone}" type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone: standalone:
@ -66,6 +66,16 @@ redis:
nodes: "${REDIS_NODES:}" nodes: "${REDIS_NODES:}"
# Maximum number of redirects to follow when executing commands across the cluster. # Maximum number of redirects to follow when executing commands across the cluster.
max-redirects: "${REDIS_MAX_REDIRECTS:12}" max-redirects: "${REDIS_MAX_REDIRECTS:12}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
sentinel:
# name of master node
master: "${REDIS_MASTER:}"
# comma-separated list of "host:port" pairs of sentinels
sentinels: "${REDIS_SENTINELS:}"
# password to authenticate with sentinel
password: "${REDIS_SENTINEL_PASSWORD:}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
# db index # db index
db: "${REDIS_DB:0}" db: "${REDIS_DB:0}"

View File

@ -46,7 +46,7 @@ cache:
type: "${CACHE_TYPE:redis}" type: "${CACHE_TYPE:redis}"
redis: redis:
# standalone or cluster # standalone or cluster or sentinel
connection: connection:
type: "${REDIS_CONNECTION_TYPE:standalone}" type: "${REDIS_CONNECTION_TYPE:standalone}"
standalone: standalone:
@ -66,6 +66,16 @@ redis:
nodes: "${REDIS_NODES:}" nodes: "${REDIS_NODES:}"
# Maximum number of redirects to follow when executing commands across the cluster. # Maximum number of redirects to follow when executing commands across the cluster.
max-redirects: "${REDIS_MAX_REDIRECTS:12}" max-redirects: "${REDIS_MAX_REDIRECTS:12}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
sentinel:
# name of master node
master: "${REDIS_MASTER:}"
# comma-separated list of "host:port" pairs of sentinels
sentinels: "${REDIS_SENTINELS:}"
# password to authenticate with sentinel
password: "${REDIS_SENTINEL_PASSWORD:}"
# if set false will be used pool config build from values of the pool config section
useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}" useDefaultPoolConfig: "${REDIS_USE_DEFAULT_POOL_CONFIG:true}"
# db index # db index
db: "${REDIS_DB:0}" db: "${REDIS_DB:0}"