cherry-pick bfdd52c from fix/ttlCleanUpServices and added the upgrade from 3.1.1

This commit is contained in:
Dmytro Shvaika 2020-09-01 19:57:57 +03:00 committed by Andrew Shvayka
parent 39f0d92e15
commit f647c69f50
10 changed files with 32 additions and 18 deletions

View File

@ -64,6 +64,7 @@ BEGIN
AND tablename like 'ts_kv_' || '%' AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest' AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary' AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
LOOP LOOP
IF partition != partition_by_max_ttl_date THEN IF partition != partition_by_max_ttl_date THEN
IF partition_year IS NOT NULL THEN IF partition_year IS NOT NULL THEN

View File

@ -175,6 +175,11 @@ public class ThingsboardInstallService {
case "3.1.0": case "3.1.0":
log.info("Upgrading ThingsBoard from version 3.1.0 to 3.1.1 ..."); log.info("Upgrading ThingsBoard from version 3.1.0 to 3.1.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.1.0"); databaseEntitiesUpgradeService.upgradeDatabase("3.1.0");
case "3.1.1":
log.info("Upgrading ThingsBoard from version 3.1.1 to 3.1.2 ...");
if (databaseTsUpgradeService != null) {
databaseTsUpgradeService.upgradeDatabase("3.1.1");
}
log.info("Updating system data..."); log.info("Updating system data...");
systemDataLoaderService.updateSystemWidgets(); systemDataLoaderService.updateSystemWidgets();
break; break;

View File

@ -49,6 +49,7 @@ public class CassandraTsDatabaseUpgradeService extends AbstractCassandraDatabase
log.info("Schema updated."); log.info("Schema updated.");
break; break;
case "2.5.0": case "2.5.0":
case "3.1.1":
break; break;
default: default:
throw new RuntimeException("Unable to upgrade Cassandra database, unsupported fromVersion: " + fromVersion); throw new RuntimeException("Unable to upgrade Cassandra database, unsupported fromVersion: " + fromVersion);

View File

@ -195,6 +195,12 @@ public class PsqlTsDatabaseUpgradeService extends AbstractSqlTsDatabaseUpgradeSe
executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005001"); executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005001");
} }
break; break;
case "3.1.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Load Drop Partitions functions ...");
loadSql(conn, LOAD_DROP_PARTITIONS_FUNCTIONS_SQL);
}
break;
default: default:
throw new RuntimeException("Unable to upgrade SQL database, unsupported fromVersion: " + fromVersion); throw new RuntimeException("Unable to upgrade SQL database, unsupported fromVersion: " + fromVersion);
} }

View File

@ -177,6 +177,8 @@ public class TimescaleTsDatabaseUpgradeService extends AbstractSqlTsDatabaseUpgr
executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005001"); executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005001");
} }
break; break;
case "3.1.1":
break;
default: default:
throw new RuntimeException("Unable to upgrade SQL database, unsupported fromVersion: " + fromVersion); throw new RuntimeException("Unable to upgrade SQL database, unsupported fromVersion: " + fromVersion);
} }

View File

@ -38,19 +38,15 @@ public abstract class AbstractCleanUpService {
@Value("${spring.datasource.password}") @Value("${spring.datasource.password}")
protected String dbPassword; protected String dbPassword;
protected long executeQuery(Connection conn, String query) { protected long executeQuery(Connection conn, String query) throws SQLException {
long removed = 0L; try (Statement statement = conn.createStatement()) {
try {
Statement statement = conn.createStatement();
ResultSet resultSet = statement.executeQuery(query); ResultSet resultSet = statement.executeQuery(query);
getWarnings(statement); if (log.isDebugEnabled()) {
getWarnings(statement);
}
resultSet.next(); resultSet.next();
removed = resultSet.getLong(1); return resultSet.getLong(1);
log.debug("Successfully executed query: {}", query);
} catch (SQLException e) {
log.debug("Failed to execute query: {} due to: {}", query, e.getMessage());
} }
return removed;
} }
protected void getWarnings(Statement statement) throws SQLException { protected void getWarnings(Statement statement) throws SQLException {
@ -65,6 +61,6 @@ public abstract class AbstractCleanUpService {
} }
} }
protected abstract void doCleanUp(Connection connection); protected abstract void doCleanUp(Connection connection) throws SQLException;
} }

View File

@ -52,7 +52,7 @@ public class EventsCleanUpService extends AbstractCleanUpService {
} }
@Override @Override
protected void doCleanUp(Connection connection) { protected void doCleanUp(Connection connection) throws SQLException {
long totalEventsRemoved = executeQuery(connection, "call cleanup_events_by_ttl(" + ttl + ", " + debugTtl + ", 0);"); long totalEventsRemoved = executeQuery(connection, "call cleanup_events_by_ttl(" + ttl + ", " + debugTtl + ", 0);");
log.info("Total events removed by TTL: [{}]", totalEventsRemoved); log.info("Total events removed by TTL: [{}]", totalEventsRemoved);
} }

View File

@ -23,6 +23,7 @@ import org.thingsboard.server.dao.util.PsqlDao;
import org.thingsboard.server.dao.util.SqlTsDao; import org.thingsboard.server.dao.util.SqlTsDao;
import java.sql.Connection; import java.sql.Connection;
import java.sql.SQLException;
@SqlTsDao @SqlTsDao
@PsqlDao @PsqlDao
@ -34,10 +35,10 @@ public class PsqlTimeseriesCleanUpService extends AbstractTimeseriesCleanUpServi
private String partitionType; private String partitionType;
@Override @Override
protected void doCleanUp(Connection connection) { protected void doCleanUp(Connection connection) throws SQLException {
long totalPartitionsRemoved = executeQuery(connection, "call drop_partitions_by_max_ttl('" + partitionType + "'," + systemTtl + ", 0);"); long totalPartitionsRemoved = executeQuery(connection, "call drop_partitions_by_max_ttl('" + partitionType + "'," + systemTtl + ", 0);");
log.info("Total partitions removed by TTL: [{}]", totalPartitionsRemoved); log.info("Total partitions removed by TTL: [{}]", totalPartitionsRemoved);
long totalEntitiesTelemetryRemoved = executeQuery(connection, "call cleanup_timeseries_by_ttl('" + ModelConstants.NULL_UUID + "'," + systemTtl + ", 0);"); long totalEntitiesTelemetryRemoved = executeQuery(connection, "call cleanup_timeseries_by_ttl('" + ModelConstants.NULL_UUID + "'," + systemTtl + ", 0);");
log.info("Total telemetry removed stats by TTL for entities: [{}]", totalEntitiesTelemetryRemoved); log.info("Total telemetry removed stats by TTL for entities: [{}]", totalEntitiesTelemetryRemoved);
} }
} }

View File

@ -21,6 +21,7 @@ import org.thingsboard.server.dao.model.ModelConstants;
import org.thingsboard.server.dao.util.TimescaleDBTsDao; import org.thingsboard.server.dao.util.TimescaleDBTsDao;
import java.sql.Connection; import java.sql.Connection;
import java.sql.SQLException;
@TimescaleDBTsDao @TimescaleDBTsDao
@Service @Service
@ -28,7 +29,7 @@ import java.sql.Connection;
public class TimescaleTimeseriesCleanUpService extends AbstractTimeseriesCleanUpService { public class TimescaleTimeseriesCleanUpService extends AbstractTimeseriesCleanUpService {
@Override @Override
protected void doCleanUp(Connection connection) { protected void doCleanUp(Connection connection) throws SQLException {
long totalEntitiesTelemetryRemoved = executeQuery(connection, "call cleanup_timeseries_by_ttl('" + ModelConstants.NULL_UUID + "'," + systemTtl + ", 0);"); long totalEntitiesTelemetryRemoved = executeQuery(connection, "call cleanup_timeseries_by_ttl('" + ModelConstants.NULL_UUID + "'," + systemTtl + ", 0);");
log.info("Total telemetry removed stats by TTL for entities: [{}]", totalEntitiesTelemetryRemoved); log.info("Total telemetry removed stats by TTL for entities: [{}]", totalEntitiesTelemetryRemoved);
} }

View File

@ -84,6 +84,7 @@ BEGIN
AND tablename like 'ts_kv_' || '%' AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest' AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary' AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
LOOP LOOP
IF partition != partition_by_max_ttl_date THEN IF partition != partition_by_max_ttl_date THEN
IF partition_year IS NOT NULL THEN IF partition_year IS NOT NULL THEN