Browse Source

Merge branch 'master' into feature/push-notifications

# Conflicts:
#	application/src/main/java/org/thingsboard/server/controller/BaseController.java
pull/8522/head^2
ViacheslavKlimov 2 years ago
parent
commit
4ff3f7851c
  1. 18
      .github/ISSUE_TEMPLATE/---bug-report.md
  2. 6
      .github/ISSUE_TEMPLATE/feature_request.md
  3. 10
      .github/ISSUE_TEMPLATE/question.md
  4. 54
      .github/workflows/license-header-format.yml
  5. 15
      application/src/main/data/json/system/widget_bundles/buttons.json
  6. 3
      application/src/main/data/json/system/widget_bundles/control_widgets.json
  7. 29
      application/src/main/data/json/system/widget_types/action_button.json
  8. 39
      application/src/main/data/json/system/widget_types/command_button.json
  9. 36
      application/src/main/data/json/system/widget_types/power_button.json
  10. 3
      application/src/main/data/json/system/widget_types/progress_bar.json
  11. 3
      application/src/main/data/json/system/widget_types/simple_value_and_chart_card.json
  12. 11
      application/src/main/data/json/system/widget_types/single_switch.json
  13. 39
      application/src/main/data/json/system/widget_types/slider.json
  14. 36
      application/src/main/data/json/tenant/dashboards/gateways.json
  15. 187
      application/src/main/data/upgrade/1.3.0/schema_update.cql
  16. 17
      application/src/main/data/upgrade/1.3.1/schema_update.sql
  17. 112
      application/src/main/data/upgrade/1.4.0/schema_update.cql
  18. 41
      application/src/main/data/upgrade/1.4.0/schema_update.sql
  19. 103
      application/src/main/data/upgrade/2.0.0/schema_update.cql
  20. 44
      application/src/main/data/upgrade/2.0.0/schema_update.sql
  21. 74
      application/src/main/data/upgrade/2.1.1/schema_update.cql
  22. 29
      application/src/main/data/upgrade/2.1.1/schema_update.sql
  23. 110
      application/src/main/data/upgrade/2.1.2/schema_update.cql
  24. 32
      application/src/main/data/upgrade/2.1.2/schema_update.sql
  25. 19
      application/src/main/data/upgrade/2.2.0/schema_update.sql
  26. 17
      application/src/main/data/upgrade/2.3.1/schema_update.sql
  27. 23
      application/src/main/data/upgrade/2.4.0/schema_update.sql
  28. 31
      application/src/main/data/upgrade/2.4.2/schema_update.sql
  29. 209
      application/src/main/data/upgrade/2.4.3/schema_update_psql_drop_partitions.sql
  30. 359
      application/src/main/data/upgrade/2.4.3/schema_update_psql_ts.sql
  31. 208
      application/src/main/data/upgrade/2.4.3/schema_update_timescale_ts.sql
  32. 150
      application/src/main/data/upgrade/2.4.3/schema_update_ttl.sql
  33. 878
      application/src/main/data/upgrade/3.0.1/schema_update_to_uuid.sql
  34. 17
      application/src/main/data/upgrade/3.1.0/schema_update.sql
  35. 28
      application/src/main/data/upgrade/3.1.1/schema_update_after.sql
  36. 154
      application/src/main/data/upgrade/3.1.1/schema_update_before.sql
  37. 23
      application/src/main/data/upgrade/3.2.1/schema_update.sql
  38. 87
      application/src/main/data/upgrade/3.2.1/schema_update_ttl.sql
  39. 216
      application/src/main/data/upgrade/3.2.2/schema_update.sql
  40. 90
      application/src/main/data/upgrade/3.2.2/schema_update_event.sql
  41. 32
      application/src/main/data/upgrade/3.2.2/schema_update_ttl.sql
  42. 71
      application/src/main/data/upgrade/3.3.2/schema_update.sql
  43. 213
      application/src/main/data/upgrade/3.3.2/schema_update_lwm2m_bootstrap.sql
  44. 50
      application/src/main/data/upgrade/3.3.3/schema_event_ttl_procedure.sql
  45. 29
      application/src/main/data/upgrade/3.3.3/schema_update.sql
  46. 140
      application/src/main/data/upgrade/3.3.4/schema_update.sql
  47. 234
      application/src/main/data/upgrade/3.4.0/schema_update.sql
  48. 142
      application/src/main/data/upgrade/3.4.1/schema_update.sql
  49. 21
      application/src/main/data/upgrade/3.4.1/schema_update_after.sql
  50. 46
      application/src/main/data/upgrade/3.4.1/schema_update_before.sql
  51. 379
      application/src/main/data/upgrade/3.4.4/schema_update.sql
  52. 14
      application/src/main/java/org/thingsboard/server/actors/device/DeviceActorMessageProcessor.java
  53. 2
      application/src/main/java/org/thingsboard/server/actors/ruleChain/RuleChainActorMessageProcessor.java
  54. 2
      application/src/main/java/org/thingsboard/server/config/RateLimitProcessingFilter.java
  55. 2
      application/src/main/java/org/thingsboard/server/controller/AuthController.java
  56. 15
      application/src/main/java/org/thingsboard/server/controller/BaseController.java
  57. 2
      application/src/main/java/org/thingsboard/server/controller/plugin/TbWebSocketHandler.java
  58. 156
      application/src/main/java/org/thingsboard/server/install/ThingsboardInstallService.java
  59. 4
      application/src/main/java/org/thingsboard/server/service/edge/EdgeContextComponent.java
  60. 28
      application/src/main/java/org/thingsboard/server/service/edge/rpc/EdgeGrpcService.java
  61. 50
      application/src/main/java/org/thingsboard/server/service/edge/rpc/EdgeGrpcSession.java
  62. 2
      application/src/main/java/org/thingsboard/server/service/edge/rpc/processor/alarm/BaseAlarmProcessor.java
  63. 5
      application/src/main/java/org/thingsboard/server/service/edge/rpc/processor/rule/RuleChainEdgeProcessor.java
  64. 115
      application/src/main/java/org/thingsboard/server/service/entitiy/queue/DefaultTbQueueService.java
  65. 24
      application/src/main/java/org/thingsboard/server/service/install/CassandraTsDatabaseUpgradeService.java
  66. 114
      application/src/main/java/org/thingsboard/server/service/install/DatabaseHelper.java
  67. 21
      application/src/main/java/org/thingsboard/server/service/install/DefaultSystemDataLoaderService.java
  68. 691
      application/src/main/java/org/thingsboard/server/service/install/SqlDatabaseUpgradeService.java
  69. 209
      application/src/main/java/org/thingsboard/server/service/install/SqlTsDatabaseUpgradeService.java
  70. 2
      application/src/main/java/org/thingsboard/server/service/install/SystemDataLoaderService.java
  71. 49
      application/src/main/java/org/thingsboard/server/service/install/TbRuleEngineQueueConfigService.java
  72. 162
      application/src/main/java/org/thingsboard/server/service/install/TimescaleTsDatabaseUpgradeService.java
  73. 218
      application/src/main/java/org/thingsboard/server/service/install/cql/CassandraDbHelper.java
  74. 327
      application/src/main/java/org/thingsboard/server/service/install/migrate/CassandraEntitiesToSqlMigrateService.java
  75. 4
      application/src/main/java/org/thingsboard/server/service/install/migrate/CassandraTsLatestToSqlMigrateService.java
  76. 176
      application/src/main/java/org/thingsboard/server/service/install/sql/SqlDbHelper.java
  77. 41
      application/src/main/java/org/thingsboard/server/service/install/update/DefaultCacheCleanupService.java
  78. 572
      application/src/main/java/org/thingsboard/server/service/install/update/DefaultDataUpdateService.java
  79. 115
      application/src/main/java/org/thingsboard/server/service/install/update/RateLimitsUpdater.java
  80. 2
      application/src/main/java/org/thingsboard/server/service/notification/DefaultNotificationCenter.java
  81. 2
      application/src/main/java/org/thingsboard/server/service/notification/rule/DefaultNotificationRuleProcessor.java
  82. 62
      application/src/main/java/org/thingsboard/server/service/notification/rule/trigger/EdgeCommunicationFailureTriggerProcessor.java
  83. 60
      application/src/main/java/org/thingsboard/server/service/notification/rule/trigger/EdgeConnectionTriggerProcessor.java
  84. 64
      application/src/main/java/org/thingsboard/server/service/queue/DefaultTbClusterService.java
  85. 10
      application/src/main/java/org/thingsboard/server/service/queue/DefaultTbCoreConsumerService.java
  86. 71
      application/src/main/java/org/thingsboard/server/service/queue/DefaultTbRuleEngineConsumerService.java
  87. 4
      application/src/main/java/org/thingsboard/server/service/queue/TbCoreConsumerStats.java
  88. 1
      application/src/main/java/org/thingsboard/server/service/queue/TbMsgPackCallback.java
  89. 12
      application/src/main/java/org/thingsboard/server/service/queue/processing/AbstractTbRuleEngineSubmitStrategy.java
  90. 2
      application/src/main/java/org/thingsboard/server/service/security/auth/mfa/DefaultTwoFactorAuthService.java
  91. 8
      application/src/main/java/org/thingsboard/server/service/subscription/DefaultSubscriptionManagerService.java
  92. 2
      application/src/main/java/org/thingsboard/server/service/sync/ie/DefaultEntitiesExportImportService.java
  93. 15
      application/src/main/java/org/thingsboard/server/service/sync/ie/exporting/impl/NotificationRuleExportService.java
  94. 15
      application/src/main/java/org/thingsboard/server/service/sync/ie/importing/impl/NotificationRuleImportService.java
  95. 60
      application/src/main/resources/thingsboard.yml
  96. 20
      application/src/test/java/org/thingsboard/server/controller/AbstractNotifyEntityTest.java
  97. 37
      application/src/test/java/org/thingsboard/server/edge/DeviceEdgeTest.java
  98. 23
      application/src/test/java/org/thingsboard/server/edge/RuleChainEdgeTest.java
  99. 7
      application/src/test/java/org/thingsboard/server/edge/imitator/EdgeImitator.java
  100. 6
      application/src/test/java/org/thingsboard/server/queue/discovery/HashPartitionServiceTest.java

18
.github/ISSUE_TEMPLATE/---bug-report.md

@ -1,9 +1,9 @@
---
name: "\U0001F41E Bug report"
about: Create a report to help us improve
title: "[Bug] "
labels: bug
assignees: ashvayka, vvlladd28
title: "Your title here"
labels: ['bug', 'unconfirmed']
assignees: AndriichnekoDm
---
@ -12,11 +12,13 @@ A clear and concise description of what the bug is.
**Your Server Environment**
<!-- 🔅🔅🔅🔅🔅🔅🔅 Choose one of the following or write your own 🔅🔅🔅🔅🔅🔅🔅-->
* demo.thingsboard.io
* cloud.thingsboard.io
* [https://demo.thingsboard.io](demo.thingsboard.io)
* [https://thingsboard.cloud](thingsboard.cloud)
* own setup
* cloud or local infrastructure or docker deployment
* Deployment: monolith or microservices
* Deployment type: deb, rpm, exe, docker-compose, k8s, ami
* ThingsBoard Version
* Community or Professional Edition
* OS Name and Version
**Your Client Environment**
@ -54,7 +56,7 @@ Steps to reproduce the behavior:
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
If applicable, please add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
Please feel free to add any other context about the problem here.

6
.github/ISSUE_TEMPLATE/feature_request.md

@ -1,9 +1,9 @@
---
name: Feature request
about: Suggest an idea for this project
title: "[Feature Request]"
labels: feature
assignees: ikulikov
title: "Your title here"
labels: ['feature']
assignees: 'AndriichnekoDm'
---

10
.github/ISSUE_TEMPLATE/question.md

@ -1,9 +1,9 @@
---
name: Question
about: Describe your questions in details
title: "[Question] your title here"
labels: question
assignees: ashvayka
about: Describe your questions in detail
title: "Your title here"
labels: ['question']
assignees: 'AndriichnekoDm'
---
@ -16,7 +16,7 @@ assignees: ashvayka
* Generic
**Description**
A clear and concise details.
Clear and concise details.
**Environment**
<!-- Add information about your environment and ThingsBoard version if applicable -->

54
.github/workflows/license-header-format.yml

@ -0,0 +1,54 @@
#
# Copyright © 2016-2024 The Thingsboard Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
name: License header format
on:
push:
branches:
- 'master'
- 'develop/3*'
- 'hotfix/3*'
jobs:
license-format:
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up JDK
uses: actions/setup-java@v4
with:
distribution: 'corretto' # https://github.com/actions/setup-java?tab=readme-ov-file#supported-distributions
java-version: '21'
cache: 'maven' # https://github.com/actions/setup-java?tab=readme-ov-file#caching-sbt-dependencies
- name: License header format
run: mvn -T 1C license:format
- name: License header format (msa/black-box-tests/)
run: mvn -T 1C license:format -f msa/black-box-tests/
- name: Set Git user information
run: |
git config user.name "ThingsBoard Bot"
git config user.email "noreply@thingsboard.io"
- name: Check and push changes
run: |
git diff --exit-code || git commit -am "License header format" && git push

15
application/src/main/data/json/system/widget_bundles/buttons.json

File diff suppressed because one or more lines are too long

3
application/src/main/data/json/system/widget_bundles/control_widgets.json

@ -9,6 +9,9 @@
},
"widgetTypeFqns": [
"single_switch",
"command_button",
"power_button",
"slider",
"control_widgets.switch_control",
"control_widgets.slide_toggle_control",
"control_widgets.round_switch",

29
application/src/main/data/json/system/widget_types/action_button.json

File diff suppressed because one or more lines are too long

39
application/src/main/data/json/system/widget_types/command_button.json

File diff suppressed because one or more lines are too long

36
application/src/main/data/json/system/widget_types/power_button.json

File diff suppressed because one or more lines are too long

3
application/src/main/data/json/system/widget_types/progress_bar.json

File diff suppressed because one or more lines are too long

3
application/src/main/data/json/system/widget_types/simple_value_and_chart_card.json

File diff suppressed because one or more lines are too long

11
application/src/main/data/json/system/widget_types/single_switch.json

File diff suppressed because one or more lines are too long

39
application/src/main/data/json/system/widget_types/slider.json

File diff suppressed because one or more lines are too long

36
application/src/main/data/json/tenant/dashboards/gateways.json

@ -2317,7 +2317,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "45e4507d-3adc-bb31-8b2b-1ba09bbd56ac"
@ -2484,7 +2484,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "852eccce-98eb-24db-c783-bdd62566f906"
@ -2650,7 +2650,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "3c31ba62-e760-2bea-4c8d-d32784a86c24"
@ -2816,7 +2816,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "4b55ea81-93bf-4206-9166-3e0bdc1dd9f3"
@ -2982,7 +2982,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "babf88d0-a118-e2b5-f10e-3a5970c8a65b"
@ -3148,7 +3148,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "94de7690-f91d-b032-6771-85af99abd749"
@ -3314,7 +3314,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "18414f44-1c65-536a-14de-eaf21a7d56bd"
@ -3480,7 +3480,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "794974da-c9d2-a9f7-be47-c9eb642094e8"
@ -3646,7 +3646,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "2add705b-3e53-8559-8126-380cac686fb0"
@ -3812,7 +3812,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "7e1ba820-9992-d52a-579b-20485abb3926"
@ -3978,7 +3978,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "91af27c1-b37c-2276-6022-a332e41b2b33"
@ -4144,7 +4144,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "26cf8696-054b-13ec-7984-6fc5df20e6f1"
@ -4310,7 +4310,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "1dcfaf24-32be-cd19-62d6-86d12cc6a7ef"
@ -4476,7 +4476,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "ad2bc817-f3c4-150c-4672-8fe0c38aee8d"
@ -4642,7 +4642,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "d1ad84cd-bd9c-4dca-e4a0-f444ae8598bd"
@ -4808,7 +4808,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "bf80eef9-b879-9a08-40a4-488dbdefa125"
@ -4974,7 +4974,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "b5a406b3-cc0a-8a09-9aec-3f8befae5fb8"
@ -5140,7 +5140,7 @@
"useShowWidgetActionFunction": null,
"showWidgetActionFunction": "return true;",
"type": "custom",
"customFunction": "const url = `${window.location.origin}/entities/devices/${entityId.id}`;\nwindow.open(url, '_blank');",
"customFunction": "const url = `${window.location.origin + widgetContext.utils.getEntityDetailsPageURL(entityId.id, entityId.entityType)}`;\nwindow.open(url, '_blank');",
"openInSeparateDialog": false,
"openInPopover": false,
"id": "ec1dfba3-4b43-2491-8948-f602337f8a3b"

187
application/src/main/data/upgrade/1.3.0/schema_update.cql

@ -1,187 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP MATERIALIZED VIEW IF EXISTS thingsboard.device_by_tenant_and_name;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.device_by_tenant_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.device_by_tenant_by_type_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.device_by_customer_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.device_by_customer_by_type_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.device_types_by_tenant;
DROP TABLE IF EXISTS thingsboard.device;
CREATE TABLE IF NOT EXISTS thingsboard.device (
id timeuuid,
tenant_id timeuuid,
customer_id timeuuid,
name text,
type text,
search_text text,
additional_info text,
PRIMARY KEY (id, tenant_id, customer_id, type)
);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.device_by_tenant_and_name AS
SELECT *
from thingsboard.device
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND name IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, name, id, customer_id, type)
WITH CLUSTERING ORDER BY ( name ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.device_by_tenant_and_search_text AS
SELECT *
from thingsboard.device
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, search_text, id, customer_id, type)
WITH CLUSTERING ORDER BY ( search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.device_by_tenant_by_type_and_search_text AS
SELECT *
from thingsboard.device
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, type, search_text, id, customer_id)
WITH CLUSTERING ORDER BY ( type ASC, search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.device_by_customer_and_search_text AS
SELECT *
from thingsboard.device
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( customer_id, tenant_id, search_text, id, type )
WITH CLUSTERING ORDER BY ( tenant_id DESC, search_text ASC, id DESC );
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.device_by_customer_by_type_and_search_text AS
SELECT *
from thingsboard.device
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( customer_id, tenant_id, type, search_text, id )
WITH CLUSTERING ORDER BY ( tenant_id DESC, type ASC, search_text ASC, id DESC );
DROP MATERIALIZED VIEW IF EXISTS thingsboard.asset_by_tenant_and_name;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.asset_by_tenant_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.asset_by_tenant_by_type_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.asset_by_customer_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.asset_by_customer_by_type_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.asset_types_by_tenant;
DROP TABLE IF EXISTS thingsboard.asset;
CREATE TABLE IF NOT EXISTS thingsboard.asset (
id timeuuid,
tenant_id timeuuid,
customer_id timeuuid,
name text,
type text,
search_text text,
additional_info text,
PRIMARY KEY (id, tenant_id, customer_id, type)
);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.asset_by_tenant_and_name AS
SELECT *
from thingsboard.asset
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND name IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, name, id, customer_id, type)
WITH CLUSTERING ORDER BY ( name ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.asset_by_tenant_and_search_text AS
SELECT *
from thingsboard.asset
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, search_text, id, customer_id, type)
WITH CLUSTERING ORDER BY ( search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.asset_by_tenant_by_type_and_search_text AS
SELECT *
from thingsboard.asset
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, type, search_text, id, customer_id)
WITH CLUSTERING ORDER BY ( type ASC, search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.asset_by_customer_and_search_text AS
SELECT *
from thingsboard.asset
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( customer_id, tenant_id, search_text, id, type )
WITH CLUSTERING ORDER BY ( tenant_id DESC, search_text ASC, id DESC );
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.asset_by_customer_by_type_and_search_text AS
SELECT *
from thingsboard.asset
WHERE tenant_id IS NOT NULL AND customer_id IS NOT NULL AND type IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( customer_id, tenant_id, type, search_text, id )
WITH CLUSTERING ORDER BY ( tenant_id DESC, type ASC, search_text ASC, id DESC );
CREATE TABLE IF NOT EXISTS thingsboard.entity_subtype (
tenant_id timeuuid,
entity_type text, // (DEVICE, ASSET)
type text,
PRIMARY KEY (tenant_id, entity_type, type)
);
CREATE TABLE IF NOT EXISTS thingsboard.alarm (
id timeuuid,
tenant_id timeuuid,
type text,
originator_id timeuuid,
originator_type text,
severity text,
status text,
start_ts bigint,
end_ts bigint,
ack_ts bigint,
clear_ts bigint,
details text,
propagate boolean,
PRIMARY KEY ((tenant_id, originator_id, originator_type), type, id)
) WITH CLUSTERING ORDER BY ( type ASC, id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.alarm_by_id AS
SELECT *
from thingsboard.alarm
WHERE tenant_id IS NOT NULL AND originator_id IS NOT NULL AND originator_type IS NOT NULL AND type IS NOT NULL
AND type IS NOT NULL AND id IS NOT NULL
PRIMARY KEY (id, tenant_id, originator_id, originator_type, type)
WITH CLUSTERING ORDER BY ( tenant_id ASC, originator_id ASC, originator_type ASC, type ASC);
DROP MATERIALIZED VIEW IF EXISTS thingsboard.relation_by_type_and_child_type;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.reverse_relation;
DROP TABLE IF EXISTS thingsboard.relation;
CREATE TABLE IF NOT EXISTS thingsboard.relation (
from_id timeuuid,
from_type text,
to_id timeuuid,
to_type text,
relation_type_group text,
relation_type text,
additional_info text,
PRIMARY KEY ((from_id, from_type), relation_type_group, relation_type, to_id, to_type)
) WITH CLUSTERING ORDER BY ( relation_type_group ASC, relation_type ASC, to_id ASC, to_type ASC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.relation_by_type_and_child_type AS
SELECT *
from thingsboard.relation
WHERE from_id IS NOT NULL AND from_type IS NOT NULL AND relation_type_group IS NOT NULL AND relation_type IS NOT NULL AND to_id IS NOT NULL AND to_type IS NOT NULL
PRIMARY KEY ((from_id, from_type), relation_type_group, relation_type, to_type, to_id)
WITH CLUSTERING ORDER BY ( relation_type_group ASC, relation_type ASC, to_type ASC, to_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.reverse_relation AS
SELECT *
from thingsboard.relation
WHERE from_id IS NOT NULL AND from_type IS NOT NULL AND relation_type_group IS NOT NULL AND relation_type IS NOT NULL AND to_id IS NOT NULL AND to_type IS NOT NULL
PRIMARY KEY ((to_id, to_type), relation_type_group, relation_type, from_id, from_type)
WITH CLUSTERING ORDER BY ( relation_type_group ASC, relation_type ASC, from_id ASC, from_type ASC);

17
application/src/main/data/upgrade/1.3.1/schema_update.sql

@ -1,17 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
ALTER TABLE ts_kv_latest ALTER COLUMN str_v SET DATA TYPE varchar(10000000);

112
application/src/main/data/upgrade/1.4.0/schema_update.cql

@ -1,112 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS thingsboard.audit_log_by_entity_id (
tenant_id timeuuid,
id timeuuid,
customer_id timeuuid,
entity_id timeuuid,
entity_type text,
entity_name text,
user_id timeuuid,
user_name text,
action_type text,
action_data text,
action_status text,
action_failure_details text,
PRIMARY KEY ((tenant_id, entity_id, entity_type), id)
);
CREATE TABLE IF NOT EXISTS thingsboard.audit_log_by_customer_id (
tenant_id timeuuid,
id timeuuid,
customer_id timeuuid,
entity_id timeuuid,
entity_type text,
entity_name text,
user_id timeuuid,
user_name text,
action_type text,
action_data text,
action_status text,
action_failure_details text,
PRIMARY KEY ((tenant_id, customer_id), id)
);
CREATE TABLE IF NOT EXISTS thingsboard.audit_log_by_user_id (
tenant_id timeuuid,
id timeuuid,
customer_id timeuuid,
entity_id timeuuid,
entity_type text,
entity_name text,
user_id timeuuid,
user_name text,
action_type text,
action_data text,
action_status text,
action_failure_details text,
PRIMARY KEY ((tenant_id, user_id), id)
);
CREATE TABLE IF NOT EXISTS thingsboard.audit_log_by_tenant_id (
tenant_id timeuuid,
id timeuuid,
partition bigint,
customer_id timeuuid,
entity_id timeuuid,
entity_type text,
entity_name text,
user_id timeuuid,
user_name text,
action_type text,
action_data text,
action_status text,
action_failure_details text,
PRIMARY KEY ((tenant_id, partition), id)
);
CREATE TABLE IF NOT EXISTS thingsboard.audit_log_by_tenant_id_partitions (
tenant_id timeuuid,
partition bigint,
PRIMARY KEY (( tenant_id ), partition)
) WITH CLUSTERING ORDER BY ( partition ASC )
AND compaction = { 'class' : 'LeveledCompactionStrategy' };
DROP MATERIALIZED VIEW IF EXISTS thingsboard.dashboard_by_tenant_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.dashboard_by_customer_and_search_text;
DROP TABLE IF EXISTS thingsboard.dashboard;
CREATE TABLE IF NOT EXISTS thingsboard.dashboard (
id timeuuid,
tenant_id timeuuid,
title text,
search_text text,
assigned_customers text,
configuration text,
PRIMARY KEY (id, tenant_id)
);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.dashboard_by_tenant_and_search_text AS
SELECT *
from thingsboard.dashboard
WHERE tenant_id IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, search_text, id )
WITH CLUSTERING ORDER BY ( search_text ASC, id DESC );

41
application/src/main/data/upgrade/1.4.0/schema_update.sql

@ -1,41 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS audit_log (
id varchar(31) NOT NULL CONSTRAINT audit_log_pkey PRIMARY KEY,
tenant_id varchar(31),
customer_id varchar(31),
entity_id varchar(31),
entity_type varchar(255),
entity_name varchar(255),
user_id varchar(31),
user_name varchar(255),
action_type varchar(255),
action_data varchar(1000000),
action_status varchar(255),
action_failure_details varchar(1000000)
);
DROP TABLE IF EXISTS dashboard;
CREATE TABLE IF NOT EXISTS dashboard (
id varchar(31) NOT NULL CONSTRAINT dashboard_pkey PRIMARY KEY,
configuration varchar(10000000),
assigned_customers varchar(1000000),
search_text varchar(255),
tenant_id varchar(31),
title varchar(255)
);

103
application/src/main/data/upgrade/2.0.0/schema_update.cql

@ -1,103 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS thingsboard.msg_queue (
node_id timeuuid,
cluster_partition bigint,
ts_partition bigint,
ts bigint,
msg blob,
PRIMARY KEY ((node_id, cluster_partition, ts_partition), ts))
WITH CLUSTERING ORDER BY (ts DESC)
AND compaction = {
'class': 'org.apache.cassandra.db.compaction.DateTieredCompactionStrategy',
'min_threshold': '5',
'base_time_seconds': '43200',
'max_window_size_seconds': '43200',
'tombstone_threshold': '0.9',
'unchecked_tombstone_compaction': 'true'
};
CREATE TABLE IF NOT EXISTS thingsboard.msg_ack_queue (
node_id timeuuid,
cluster_partition bigint,
ts_partition bigint,
msg_id timeuuid,
PRIMARY KEY ((node_id, cluster_partition, ts_partition), msg_id))
WITH CLUSTERING ORDER BY (msg_id DESC)
AND compaction = {
'class': 'org.apache.cassandra.db.compaction.DateTieredCompactionStrategy',
'min_threshold': '5',
'base_time_seconds': '43200',
'max_window_size_seconds': '43200',
'tombstone_threshold': '0.9',
'unchecked_tombstone_compaction': 'true'
};
CREATE TABLE IF NOT EXISTS thingsboard.processed_msg_partitions (
node_id timeuuid,
cluster_partition bigint,
ts_partition bigint,
PRIMARY KEY ((node_id, cluster_partition), ts_partition))
WITH CLUSTERING ORDER BY (ts_partition DESC)
AND compaction = {
'class': 'org.apache.cassandra.db.compaction.DateTieredCompactionStrategy',
'min_threshold': '5',
'base_time_seconds': '43200',
'max_window_size_seconds': '43200',
'tombstone_threshold': '0.9',
'unchecked_tombstone_compaction': 'true'
};
CREATE TABLE IF NOT EXISTS thingsboard.rule_chain (
id uuid,
tenant_id uuid,
name text,
search_text text,
first_rule_node_id uuid,
root boolean,
debug_mode boolean,
configuration text,
additional_info text,
PRIMARY KEY (id, tenant_id)
);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.rule_chain_by_tenant_and_search_text AS
SELECT *
from thingsboard.rule_chain
WHERE tenant_id IS NOT NULL AND search_text IS NOT NULL AND id IS NOT NULL
PRIMARY KEY ( tenant_id, search_text, id )
WITH CLUSTERING ORDER BY ( search_text ASC, id DESC );
CREATE TABLE IF NOT EXISTS thingsboard.rule_node (
id uuid,
rule_chain_id uuid,
type text,
name text,
debug_mode boolean,
search_text text,
configuration text,
additional_info text,
PRIMARY KEY (id)
);
DROP MATERIALIZED VIEW IF EXISTS thingsboard.rule_by_plugin_token;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.rule_by_tenant_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.plugin_by_api_token;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.plugin_by_tenant_and_search_text;
DROP TABLE IF EXISTS thingsboard.rule;
DROP TABLE IF EXISTS thingsboard.plugin;

44
application/src/main/data/upgrade/2.0.0/schema_update.sql

@ -1,44 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS rule_chain (
id varchar(31) NOT NULL CONSTRAINT rule_chain_pkey PRIMARY KEY,
additional_info varchar,
configuration varchar(10000000),
name varchar(255),
first_rule_node_id varchar(31),
root boolean,
debug_mode boolean,
search_text varchar(255),
tenant_id varchar(31)
);
CREATE TABLE IF NOT EXISTS rule_node (
id varchar(31) NOT NULL CONSTRAINT rule_node_pkey PRIMARY KEY,
rule_chain_id varchar(31),
additional_info varchar,
configuration varchar(10000000),
type varchar(255),
name varchar(255),
debug_mode boolean,
search_text varchar(255)
);
DROP TABLE rule;
DROP TABLE plugin;
DELETE FROM alarm WHERE originator_type = 3 OR originator_type = 4;
UPDATE alarm SET originator_type = (originator_type - 2) where originator_type > 2;

74
application/src/main/data/upgrade/2.1.1/schema_update.cql

@ -1,74 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS thingsboard.entity_views (
id timeuuid,
entity_id timeuuid,
entity_type text,
tenant_id timeuuid,
customer_id timeuuid,
name text,
keys text,
start_ts bigint,
end_ts bigint,
search_text text,
additional_info text,
PRIMARY KEY (id, entity_id, tenant_id, customer_id)
);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_name AS
SELECT *
from thingsboard.entity_views
WHERE tenant_id IS NOT NULL
AND entity_id IS NOT NULL
AND customer_id IS NOT NULL
AND name IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, name, id, customer_id, entity_id)
WITH CLUSTERING ORDER BY (name ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_search_text AS
SELECT *
from thingsboard.entity_views
WHERE tenant_id IS NOT NULL
AND entity_id IS NOT NULL
AND customer_id IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, search_text, id, customer_id, entity_id)
WITH CLUSTERING ORDER BY (search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_customer AS
SELECT *
from thingsboard.entity_views
WHERE tenant_id IS NOT NULL
AND customer_id IS NOT NULL
AND entity_id IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, customer_id, search_text, id, entity_id)
WITH CLUSTERING ORDER BY (customer_id DESC, search_text ASC, id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_entity_id AS
SELECT *
from thingsboard.entity_views
WHERE tenant_id IS NOT NULL
AND customer_id IS NOT NULL
AND entity_id IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, entity_id, customer_id, search_text, id)
WITH CLUSTERING ORDER BY (entity_id DESC, customer_id DESC, search_text ASC, id DESC);

29
application/src/main/data/upgrade/2.1.1/schema_update.sql

@ -1,29 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS entity_views (
id varchar(31) NOT NULL CONSTRAINT entity_views_pkey PRIMARY KEY,
entity_id varchar(31),
entity_type varchar(255),
tenant_id varchar(31),
customer_id varchar(31),
name varchar(255),
keys varchar(255),
start_ts bigint,
end_ts bigint,
search_text varchar(255),
additional_info varchar
);

110
application/src/main/data/upgrade/2.1.2/schema_update.cql

@ -1,110 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP MATERIALIZED VIEW IF EXISTS thingsboard.entity_view_by_tenant_and_name;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.entity_view_by_tenant_and_search_text;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.entity_view_by_tenant_and_customer;
DROP MATERIALIZED VIEW IF EXISTS thingsboard.entity_view_by_tenant_and_entity_id;
DROP TABLE IF EXISTS thingsboard.entity_views;
CREATE TABLE IF NOT EXISTS thingsboard.entity_view (
id timeuuid,
entity_id timeuuid,
entity_type text,
tenant_id timeuuid,
customer_id timeuuid,
name text,
type text,
keys text,
start_ts bigint,
end_ts bigint,
search_text text,
additional_info text,
PRIMARY KEY (id, entity_id, tenant_id, customer_id, type)
);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_name AS
SELECT *
from thingsboard.entity_view
WHERE tenant_id IS NOT NULL
AND entity_id IS NOT NULL
AND customer_id IS NOT NULL
AND type IS NOT NULL
AND name IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, name, id, customer_id, entity_id, type)
WITH CLUSTERING ORDER BY (name ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_search_text AS
SELECT *
from thingsboard.entity_view
WHERE tenant_id IS NOT NULL
AND entity_id IS NOT NULL
AND customer_id IS NOT NULL
AND type IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, search_text, id, customer_id, entity_id, type)
WITH CLUSTERING ORDER BY (search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_by_type_and_search_text AS
SELECT *
from thingsboard.entity_view
WHERE tenant_id IS NOT NULL
AND entity_id IS NOT NULL
AND customer_id IS NOT NULL
AND type IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, type, search_text, id, customer_id, entity_id)
WITH CLUSTERING ORDER BY (type ASC, search_text ASC, id DESC, customer_id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_customer AS
SELECT *
from thingsboard.entity_view
WHERE tenant_id IS NOT NULL
AND customer_id IS NOT NULL
AND entity_id IS NOT NULL
AND type IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, customer_id, search_text, id, entity_id, type)
WITH CLUSTERING ORDER BY (customer_id DESC, search_text ASC, id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_customer_and_type AS
SELECT *
from thingsboard.entity_view
WHERE tenant_id IS NOT NULL
AND customer_id IS NOT NULL
AND entity_id IS NOT NULL
AND type IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, type, customer_id, search_text, id, entity_id)
WITH CLUSTERING ORDER BY (type ASC, customer_id DESC, search_text ASC, id DESC);
CREATE MATERIALIZED VIEW IF NOT EXISTS thingsboard.entity_view_by_tenant_and_entity_id AS
SELECT *
from thingsboard.entity_view
WHERE tenant_id IS NOT NULL
AND customer_id IS NOT NULL
AND entity_id IS NOT NULL
AND type IS NOT NULL
AND search_text IS NOT NULL
AND id IS NOT NULL
PRIMARY KEY (tenant_id, entity_id, customer_id, search_text, id, type)
WITH CLUSTERING ORDER BY (entity_id DESC, customer_id DESC, search_text ASC, id DESC);

32
application/src/main/data/upgrade/2.1.2/schema_update.sql

@ -1,32 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP TABLE IF EXISTS entity_views;
CREATE TABLE IF NOT EXISTS entity_view (
id varchar(31) NOT NULL CONSTRAINT entity_view_pkey PRIMARY KEY,
entity_id varchar(31),
entity_type varchar(255),
tenant_id varchar(31),
customer_id varchar(31),
type varchar(255),
name varchar(255),
keys varchar(255),
start_ts bigint,
end_ts bigint,
search_text varchar(255),
additional_info varchar
);

19
application/src/main/data/upgrade/2.2.0/schema_update.sql

@ -1,19 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
ALTER TABLE component_descriptor ADD UNIQUE (clazz);
ALTER TABLE entity_view ALTER COLUMN keys SET DATA TYPE varchar(10000000);

17
application/src/main/data/upgrade/2.3.1/schema_update.sql

@ -1,17 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
ALTER TABLE event ALTER COLUMN body SET DATA TYPE varchar(10000000);

23
application/src/main/data/upgrade/2.4.0/schema_update.sql

@ -1,23 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE INDEX IF NOT EXISTS idx_alarm_originator_alarm_type ON alarm(tenant_id, type, originator_type, originator_id);
CREATE INDEX IF NOT EXISTS idx_event_type_entity_id ON event(tenant_id, event_type, entity_type, entity_id);
CREATE INDEX IF NOT EXISTS idx_relation_to_id ON relation(relation_type_group, to_type, to_id);
CREATE INDEX IF NOT EXISTS idx_relation_from_id ON relation(relation_type_group, from_type, from_id);

31
application/src/main/data/upgrade/2.4.2/schema_update.sql

@ -1,31 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP INDEX IF EXISTS idx_alarm_originator_alarm_type;
CREATE INDEX IF NOT EXISTS idx_alarm_originator_alarm_type ON alarm(originator_id, type, start_ts DESC);
CREATE INDEX IF NOT EXISTS idx_device_customer_id ON device(tenant_id, customer_id);
CREATE INDEX IF NOT EXISTS idx_device_customer_id_and_type ON device(tenant_id, customer_id, type);
CREATE INDEX IF NOT EXISTS idx_device_type ON device(tenant_id, type);
CREATE INDEX IF NOT EXISTS idx_asset_customer_id ON asset(tenant_id, customer_id);
CREATE INDEX IF NOT EXISTS idx_asset_customer_id_and_type ON asset(tenant_id, customer_id, type);
CREATE INDEX IF NOT EXISTS idx_asset_type ON asset(tenant_id, type);

209
application/src/main/data/upgrade/2.4.3/schema_update_psql_drop_partitions.sql

@ -1,209 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE OR REPLACE PROCEDURE drop_partitions_by_max_ttl(IN partition_type varchar, IN system_ttl bigint, INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
max_tenant_ttl bigint;
max_customer_ttl bigint;
max_ttl bigint;
date timestamp;
partition_by_max_ttl_date varchar;
partition_by_max_ttl_month varchar;
partition_by_max_ttl_day varchar;
partition_by_max_ttl_year varchar;
partition varchar;
partition_year integer;
partition_month integer;
partition_day integer;
BEGIN
SELECT max(attribute_kv.long_v)
FROM tenant
INNER JOIN attribute_kv ON tenant.id = attribute_kv.entity_id
WHERE attribute_kv.attribute_key = 'TTL'
into max_tenant_ttl;
SELECT max(attribute_kv.long_v)
FROM customer
INNER JOIN attribute_kv ON customer.id = attribute_kv.entity_id
WHERE attribute_kv.attribute_key = 'TTL'
into max_customer_ttl;
max_ttl := GREATEST(system_ttl, max_customer_ttl, max_tenant_ttl);
if max_ttl IS NOT NULL AND max_ttl > 0 THEN
date := to_timestamp(EXTRACT(EPOCH FROM current_timestamp) - max_ttl);
partition_by_max_ttl_date := get_partition_by_max_ttl_date(partition_type, date);
RAISE NOTICE 'Date by max ttl: %', date;
RAISE NOTICE 'Partition by max ttl: %', partition_by_max_ttl_date;
IF partition_by_max_ttl_date IS NOT NULL THEN
CASE
WHEN partition_type = 'DAYS' THEN
partition_by_max_ttl_year := SPLIT_PART(partition_by_max_ttl_date, '_', 3);
partition_by_max_ttl_month := SPLIT_PART(partition_by_max_ttl_date, '_', 4);
partition_by_max_ttl_day := SPLIT_PART(partition_by_max_ttl_date, '_', 5);
WHEN partition_type = 'MONTHS' THEN
partition_by_max_ttl_year := SPLIT_PART(partition_by_max_ttl_date, '_', 3);
partition_by_max_ttl_month := SPLIT_PART(partition_by_max_ttl_date, '_', 4);
ELSE
partition_by_max_ttl_year := SPLIT_PART(partition_by_max_ttl_date, '_', 3);
END CASE;
IF partition_by_max_ttl_year IS NULL THEN
RAISE NOTICE 'Failed to remove partitions by max ttl date due to partition_by_max_ttl_year is null!';
ELSE
IF partition_type = 'YEARS' THEN
FOR partition IN SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
AND tablename != partition_by_max_ttl_date
LOOP
partition_year := SPLIT_PART(partition, '_', 3)::integer;
IF partition_year < partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
END IF;
END LOOP;
ELSE
IF partition_type = 'MONTHS' THEN
IF partition_by_max_ttl_month IS NULL THEN
RAISE NOTICE 'Failed to remove months partitions by max ttl date due to partition_by_max_ttl_month is null!';
ELSE
FOR partition IN SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
AND tablename != partition_by_max_ttl_date
LOOP
partition_year := SPLIT_PART(partition, '_', 3)::integer;
IF partition_year > partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_year < partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
ELSE
partition_month := SPLIT_PART(partition, '_', 4)::integer;
IF partition_year = partition_by_max_ttl_year::integer THEN
IF partition_month >= partition_by_max_ttl_month::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
END IF;
END IF;
END IF;
END IF;
END LOOP;
END IF;
ELSE
IF partition_type = 'DAYS' THEN
IF partition_by_max_ttl_month IS NULL THEN
RAISE NOTICE 'Failed to remove days partitions by max ttl date due to partition_by_max_ttl_month is null!';
ELSE
IF partition_by_max_ttl_day IS NULL THEN
RAISE NOTICE 'Failed to remove days partitions by max ttl date due to partition_by_max_ttl_day is null!';
ELSE
FOR partition IN SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
AND tablename != partition_by_max_ttl_date
LOOP
partition_year := SPLIT_PART(partition, '_', 3)::integer;
IF partition_year > partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_year < partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
ELSE
partition_month := SPLIT_PART(partition, '_', 4)::integer;
IF partition_month > partition_by_max_ttl_month::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_month < partition_by_max_ttl_month::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
ELSE
partition_day := SPLIT_PART(partition, '_', 5)::integer;
IF partition_day >= partition_by_max_ttl_day::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_day < partition_by_max_ttl_day::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END LOOP;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END
$$;
CREATE OR REPLACE FUNCTION get_partition_by_max_ttl_date(IN partition_type varchar, IN date timestamp, OUT partition varchar) AS
$$
BEGIN
CASE
WHEN partition_type = 'DAYS' THEN
partition := 'ts_kv_' || to_char(date, 'yyyy') || '_' || to_char(date, 'MM') || '_' || to_char(date, 'dd');
WHEN partition_type = 'MONTHS' THEN
partition := 'ts_kv_' || to_char(date, 'yyyy') || '_' || to_char(date, 'MM');
WHEN partition_type = 'YEARS' THEN
partition := 'ts_kv_' || to_char(date, 'yyyy');
ELSE
partition := NULL;
END CASE;
IF partition IS NOT NULL THEN
IF NOT EXISTS(SELECT
FROM pg_tables
WHERE schemaname = 'public'
AND tablename = partition) THEN
partition := NULL;
RAISE NOTICE 'Failed to found partition by ttl';
END IF;
END IF;
END;
$$ LANGUAGE plpgsql;

359
application/src/main/data/upgrade/2.4.3/schema_update_psql_ts.sql

@ -1,359 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- call create_partition_ts_kv_table();
CREATE OR REPLACE PROCEDURE create_partition_ts_kv_table()
LANGUAGE plpgsql AS
$$
BEGIN
ALTER TABLE ts_kv
DROP CONSTRAINT IF EXISTS ts_kv_unq_key;
ALTER TABLE ts_kv
DROP CONSTRAINT IF EXISTS ts_kv_pkey;
ALTER TABLE ts_kv
ADD CONSTRAINT ts_kv_pkey PRIMARY KEY (entity_type, entity_id, key, ts);
ALTER TABLE ts_kv
RENAME TO ts_kv_old;
ALTER TABLE ts_kv_old
RENAME CONSTRAINT ts_kv_pkey TO ts_kv_pkey_old;
CREATE TABLE IF NOT EXISTS ts_kv
(
LIKE ts_kv_old
)
PARTITION BY RANGE (ts);
ALTER TABLE ts_kv
DROP COLUMN entity_type;
ALTER TABLE ts_kv
ALTER COLUMN entity_id TYPE uuid USING entity_id::uuid;
ALTER TABLE ts_kv
ALTER COLUMN key TYPE integer USING key::integer;
ALTER TABLE ts_kv
ADD CONSTRAINT ts_kv_pkey PRIMARY KEY (entity_id, key, ts);
CREATE TABLE IF NOT EXISTS ts_kv_indefinite PARTITION OF ts_kv DEFAULT;
END;
$$;
-- call create_new_ts_kv_latest_table();
CREATE OR REPLACE PROCEDURE create_new_ts_kv_latest_table()
LANGUAGE plpgsql AS
$$
BEGIN
IF NOT EXISTS(SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'ts_kv_latest_old') THEN
ALTER TABLE ts_kv_latest
DROP CONSTRAINT IF EXISTS ts_kv_latest_unq_key;
ALTER TABLE ts_kv_latest
DROP CONSTRAINT IF EXISTS ts_kv_latest_pkey;
ALTER TABLE ts_kv_latest
ADD CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_type, entity_id, key);
ALTER TABLE ts_kv_latest
RENAME TO ts_kv_latest_old;
ALTER TABLE ts_kv_latest_old
RENAME CONSTRAINT ts_kv_latest_pkey TO ts_kv_latest_pkey_old;
CREATE TABLE IF NOT EXISTS ts_kv_latest
(
LIKE ts_kv_latest_old
);
ALTER TABLE ts_kv_latest
DROP COLUMN entity_type;
ALTER TABLE ts_kv_latest
ALTER COLUMN entity_id TYPE uuid USING entity_id::uuid;
ALTER TABLE ts_kv_latest
ALTER COLUMN key TYPE integer USING key::integer;
ALTER TABLE ts_kv_latest
ADD CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_id, key);
ELSE
RAISE NOTICE 'ts_kv_latest_old table already exists!';
IF NOT EXISTS(SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'ts_kv_latest') THEN
CREATE TABLE IF NOT EXISTS ts_kv_latest
(
entity_id uuid NOT NULL,
key int NOT NULL,
ts bigint NOT NULL,
bool_v boolean,
str_v varchar(10000000),
long_v bigint,
dbl_v double precision,
json_v json,
CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_id, key)
);
END IF;
END IF;
END;
$$;
CREATE OR REPLACE FUNCTION get_partitions_data(IN partition_type varchar)
RETURNS
TABLE
(
partition_date text,
from_ts bigint,
to_ts bigint
)
AS
$$
BEGIN
CASE
WHEN partition_type = 'DAYS' THEN
RETURN QUERY SELECT day_date.day AS partition_date,
(extract(epoch from (day_date.day)::timestamp) * 1000)::bigint AS from_ts,
(extract(epoch from (day_date.day::date + INTERVAL '1 DAY')::timestamp) *
1000)::bigint AS to_ts
FROM (SELECT DISTINCT TO_CHAR(TO_TIMESTAMP(ts / 1000), 'YYYY_MM_DD') AS day
FROM ts_kv_old) AS day_date;
WHEN partition_type = 'MONTHS' THEN
RETURN QUERY SELECT SUBSTRING(month_date.first_date, 1, 7) AS partition_date,
(extract(epoch from (month_date.first_date)::timestamp) * 1000)::bigint AS from_ts,
(extract(epoch from (month_date.first_date::date + INTERVAL '1 MONTH')::timestamp) *
1000)::bigint AS to_ts
FROM (SELECT DISTINCT TO_CHAR(TO_TIMESTAMP(ts / 1000), 'YYYY_MM_01') AS first_date
FROM ts_kv_old) AS month_date;
WHEN partition_type = 'YEARS' THEN
RETURN QUERY SELECT SUBSTRING(year_date.year, 1, 4) AS partition_date,
(extract(epoch from (year_date.year)::timestamp) * 1000)::bigint AS from_ts,
(extract(epoch from (year_date.year::date + INTERVAL '1 YEAR')::timestamp) *
1000)::bigint AS to_ts
FROM (SELECT DISTINCT TO_CHAR(TO_TIMESTAMP(ts / 1000), 'YYYY_01_01') AS year
FROM ts_kv_old) AS year_date;
ELSE
RAISE EXCEPTION 'Failed to parse partitioning property: % !', partition_type;
END CASE;
END;
$$ LANGUAGE plpgsql;
-- call create_partitions();
CREATE OR REPLACE PROCEDURE create_partitions(IN partition_type varchar)
LANGUAGE plpgsql AS
$$
DECLARE
partition_date varchar;
from_ts bigint;
to_ts bigint;
partitions_cursor CURSOR FOR SELECT *
FROM get_partitions_data(partition_type);
BEGIN
OPEN partitions_cursor;
LOOP
FETCH partitions_cursor INTO partition_date, from_ts, to_ts;
EXIT WHEN NOT FOUND;
EXECUTE 'CREATE TABLE IF NOT EXISTS ts_kv_' || partition_date ||
' PARTITION OF ts_kv FOR VALUES FROM (' || from_ts ||
') TO (' || to_ts || ');';
RAISE NOTICE 'A partition % has been created!',CONCAT('ts_kv_', partition_date);
END LOOP;
CLOSE partitions_cursor;
END;
$$;
-- call create_ts_kv_dictionary_table();
CREATE OR REPLACE PROCEDURE create_ts_kv_dictionary_table()
LANGUAGE plpgsql AS
$$
BEGIN
CREATE TABLE IF NOT EXISTS ts_kv_dictionary
(
key varchar(255) NOT NULL,
key_id serial UNIQUE,
CONSTRAINT ts_key_id_pkey PRIMARY KEY (key)
);
END;
$$;
-- call insert_into_dictionary();
CREATE OR REPLACE PROCEDURE insert_into_dictionary()
LANGUAGE plpgsql AS
$$
DECLARE
insert_record RECORD;
key_cursor CURSOR FOR SELECT DISTINCT key
FROM ts_kv_old
ORDER BY key;
BEGIN
OPEN key_cursor;
LOOP
FETCH key_cursor INTO insert_record;
EXIT WHEN NOT FOUND;
IF NOT EXISTS(SELECT key FROM ts_kv_dictionary WHERE key = insert_record.key) THEN
INSERT INTO ts_kv_dictionary(key) VALUES (insert_record.key);
RAISE NOTICE 'Key: % has been inserted into the dictionary!',insert_record.key;
ELSE
RAISE NOTICE 'Key: % already exists in the dictionary!',insert_record.key;
END IF;
END LOOP;
CLOSE key_cursor;
END;
$$;
CREATE OR REPLACE FUNCTION to_uuid(IN entity_id varchar, OUT uuid_id uuid) AS
$$
BEGIN
uuid_id := substring(entity_id, 8, 8) || '-' || substring(entity_id, 4, 4) || '-1' || substring(entity_id, 1, 3) ||
'-' || substring(entity_id, 16, 4) || '-' || substring(entity_id, 20, 12);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE insert_into_ts_kv(IN path_to_file varchar)
LANGUAGE plpgsql AS
$$
BEGIN
EXECUTE format('COPY (SELECT to_uuid(entity_id) AS entity_id,
ts_kv_records.key AS key,
ts_kv_records.ts AS ts,
ts_kv_records.bool_v AS bool_v,
ts_kv_records.str_v AS str_v,
ts_kv_records.long_v AS long_v,
ts_kv_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_old
INNER JOIN ts_kv_dictionary ON (ts_kv_old.key = ts_kv_dictionary.key)) AS ts_kv_records) TO %L;',
path_to_file);
EXECUTE format('COPY ts_kv FROM %L', path_to_file);
END
$$;
-- call insert_into_ts_kv_latest();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_latest(IN path_to_file varchar)
LANGUAGE plpgsql AS
$$
BEGIN
EXECUTE format('COPY (SELECT to_uuid(entity_id) AS entity_id,
ts_kv_latest_records.key AS key,
ts_kv_latest_records.ts AS ts,
ts_kv_latest_records.bool_v AS bool_v,
ts_kv_latest_records.str_v AS str_v,
ts_kv_latest_records.long_v AS long_v,
ts_kv_latest_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_latest_old
INNER JOIN ts_kv_dictionary ON (ts_kv_latest_old.key = ts_kv_dictionary.key)) AS ts_kv_latest_records) TO %L;',
path_to_file);
EXECUTE format('COPY ts_kv_latest FROM %L', path_to_file);
END;
$$;
-- call insert_into_ts_kv_cursor();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_cursor()
LANGUAGE plpgsql AS
$$
DECLARE
insert_size CONSTANT integer := 10000;
insert_counter integer DEFAULT 0;
insert_record RECORD;
insert_cursor CURSOR FOR SELECT to_uuid(entity_id) AS entity_id,
ts_kv_records.key AS key,
ts_kv_records.ts AS ts,
ts_kv_records.bool_v AS bool_v,
ts_kv_records.str_v AS str_v,
ts_kv_records.long_v AS long_v,
ts_kv_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_old
INNER JOIN ts_kv_dictionary ON (ts_kv_old.key = ts_kv_dictionary.key)) AS ts_kv_records;
BEGIN
OPEN insert_cursor;
LOOP
insert_counter := insert_counter + 1;
FETCH insert_cursor INTO insert_record;
IF NOT FOUND THEN
RAISE NOTICE '% records have been inserted into the partitioned ts_kv!',insert_counter - 1;
EXIT;
END IF;
INSERT INTO ts_kv(entity_id, key, ts, bool_v, str_v, long_v, dbl_v)
VALUES (insert_record.entity_id, insert_record.key, insert_record.ts, insert_record.bool_v, insert_record.str_v,
insert_record.long_v, insert_record.dbl_v);
IF MOD(insert_counter, insert_size) = 0 THEN
RAISE NOTICE '% records have been inserted into the partitioned ts_kv!',insert_counter;
END IF;
END LOOP;
CLOSE insert_cursor;
END;
$$;
-- call insert_into_ts_kv_latest_cursor();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_latest_cursor()
LANGUAGE plpgsql AS
$$
DECLARE
insert_size CONSTANT integer := 10000;
insert_counter integer DEFAULT 0;
insert_record RECORD;
insert_cursor CURSOR FOR SELECT to_uuid(entity_id) AS entity_id,
ts_kv_latest_records.key AS key,
ts_kv_latest_records.ts AS ts,
ts_kv_latest_records.bool_v AS bool_v,
ts_kv_latest_records.str_v AS str_v,
ts_kv_latest_records.long_v AS long_v,
ts_kv_latest_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM ts_kv_latest_old
INNER JOIN ts_kv_dictionary ON (ts_kv_latest_old.key = ts_kv_dictionary.key)) AS ts_kv_latest_records;
BEGIN
OPEN insert_cursor;
LOOP
insert_counter := insert_counter + 1;
FETCH insert_cursor INTO insert_record;
IF NOT FOUND THEN
RAISE NOTICE '% records have been inserted into the ts_kv_latest!',insert_counter - 1;
EXIT;
END IF;
INSERT INTO ts_kv_latest(entity_id, key, ts, bool_v, str_v, long_v, dbl_v)
VALUES (insert_record.entity_id, insert_record.key, insert_record.ts, insert_record.bool_v, insert_record.str_v,
insert_record.long_v, insert_record.dbl_v);
IF MOD(insert_counter, insert_size) = 0 THEN
RAISE NOTICE '% records have been inserted into the ts_kv_latest!',insert_counter;
END IF;
END LOOP;
CLOSE insert_cursor;
END;
$$;

208
application/src/main/data/upgrade/2.4.3/schema_update_timescale_ts.sql

@ -1,208 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- call create_new_ts_kv_table();
CREATE OR REPLACE PROCEDURE create_new_ts_kv_table() LANGUAGE plpgsql AS $$
BEGIN
ALTER TABLE tenant_ts_kv
RENAME TO tenant_ts_kv_old;
CREATE TABLE IF NOT EXISTS ts_kv
(
LIKE tenant_ts_kv_old
);
ALTER TABLE ts_kv ALTER COLUMN entity_id TYPE uuid USING entity_id::uuid;
ALTER TABLE ts_kv ALTER COLUMN key TYPE integer USING key::integer;
ALTER INDEX ts_kv_pkey RENAME TO tenant_ts_kv_pkey_old;
ALTER INDEX idx_tenant_ts_kv RENAME TO idx_tenant_ts_kv_old;
ALTER INDEX tenant_ts_kv_ts_idx RENAME TO tenant_ts_kv_ts_idx_old;
ALTER TABLE ts_kv ADD CONSTRAINT ts_kv_pkey PRIMARY KEY(entity_id, key, ts);
-- CREATE INDEX IF NOT EXISTS ts_kv_ts_idx ON ts_kv(ts DESC);
ALTER TABLE ts_kv DROP COLUMN IF EXISTS tenant_id;
END;
$$;
-- call create_ts_kv_latest_table();
CREATE OR REPLACE PROCEDURE create_ts_kv_latest_table() LANGUAGE plpgsql AS $$
BEGIN
CREATE TABLE IF NOT EXISTS ts_kv_latest
(
entity_id uuid NOT NULL,
key int NOT NULL,
ts bigint NOT NULL,
bool_v boolean,
str_v varchar(10000000),
long_v bigint,
dbl_v double precision,
CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_id, key)
);
END;
$$;
-- call create_ts_kv_dictionary_table();
CREATE OR REPLACE PROCEDURE create_ts_kv_dictionary_table() LANGUAGE plpgsql AS $$
BEGIN
CREATE TABLE IF NOT EXISTS ts_kv_dictionary
(
key varchar(255) NOT NULL,
key_id serial UNIQUE,
CONSTRAINT ts_key_id_pkey PRIMARY KEY (key)
);
END;
$$;
-- call insert_into_dictionary();
CREATE OR REPLACE PROCEDURE insert_into_dictionary() LANGUAGE plpgsql AS $$
DECLARE
insert_record RECORD;
key_cursor CURSOR FOR SELECT DISTINCT key
FROM tenant_ts_kv_old
ORDER BY key;
BEGIN
OPEN key_cursor;
LOOP
FETCH key_cursor INTO insert_record;
EXIT WHEN NOT FOUND;
IF NOT EXISTS(SELECT key FROM ts_kv_dictionary WHERE key = insert_record.key) THEN
INSERT INTO ts_kv_dictionary(key) VALUES (insert_record.key);
RAISE NOTICE 'Key: % has been inserted into the dictionary!',insert_record.key;
ELSE
RAISE NOTICE 'Key: % already exists in the dictionary!',insert_record.key;
END IF;
END LOOP;
CLOSE key_cursor;
END;
$$;
CREATE OR REPLACE FUNCTION to_uuid(IN entity_id varchar, OUT uuid_id uuid) AS
$$
BEGIN
uuid_id := substring(entity_id, 8, 8) || '-' || substring(entity_id, 4, 4) || '-1' || substring(entity_id, 1, 3) ||
'-' || substring(entity_id, 16, 4) || '-' || substring(entity_id, 20, 12);
END;
$$ LANGUAGE plpgsql;
-- call insert_into_ts_kv();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv(IN path_to_file varchar) LANGUAGE plpgsql AS $$
BEGIN
EXECUTE format ('COPY (SELECT to_uuid(entity_id) AS entity_id,
new_ts_kv_records.key AS key,
new_ts_kv_records.ts AS ts,
new_ts_kv_records.bool_v AS bool_v,
new_ts_kv_records.str_v AS str_v,
new_ts_kv_records.long_v AS long_v,
new_ts_kv_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM tenant_ts_kv_old
INNER JOIN ts_kv_dictionary ON (tenant_ts_kv_old.key = ts_kv_dictionary.key)) AS new_ts_kv_records) TO %L;', path_to_file);
EXECUTE format ('COPY ts_kv FROM %L', path_to_file);
END;
$$;
-- call insert_into_ts_kv_latest();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_latest() LANGUAGE plpgsql AS $$
DECLARE
insert_size CONSTANT integer := 10000;
insert_counter integer DEFAULT 0;
latest_record RECORD;
insert_record RECORD;
insert_cursor CURSOR FOR SELECT
latest_records.key AS key,
latest_records.entity_id AS entity_id,
latest_records.ts AS ts
FROM (SELECT DISTINCT key AS key, entity_id AS entity_id, MAX(ts) AS ts FROM ts_kv GROUP BY key, entity_id) AS latest_records;
BEGIN
OPEN insert_cursor;
LOOP
insert_counter := insert_counter + 1;
FETCH insert_cursor INTO latest_record;
IF NOT FOUND THEN
RAISE NOTICE '% records have been inserted into the ts_kv_latest table!',insert_counter - 1;
EXIT;
END IF;
SELECT entity_id AS entity_id, key AS key, ts AS ts, bool_v AS bool_v, str_v AS str_v, long_v AS long_v, dbl_v AS dbl_v INTO insert_record FROM ts_kv WHERE entity_id = latest_record.entity_id AND key = latest_record.key AND ts = latest_record.ts;
INSERT INTO ts_kv_latest(entity_id, key, ts, bool_v, str_v, long_v, dbl_v)
VALUES (insert_record.entity_id, insert_record.key, insert_record.ts, insert_record.bool_v, insert_record.str_v, insert_record.long_v, insert_record.dbl_v);
IF MOD(insert_counter, insert_size) = 0 THEN
RAISE NOTICE '% records have been inserted into the ts_kv_latest table!',insert_counter;
END IF;
END LOOP;
CLOSE insert_cursor;
END;
$$;
-- call insert_into_ts_kv_cursor();
CREATE OR REPLACE PROCEDURE insert_into_ts_kv_cursor() LANGUAGE plpgsql AS $$
DECLARE
insert_size CONSTANT integer := 10000;
insert_counter integer DEFAULT 0;
insert_record RECORD;
insert_cursor CURSOR FOR SELECT to_uuid(entity_id) AS entity_id,
new_ts_kv_records.key AS key,
new_ts_kv_records.ts AS ts,
new_ts_kv_records.bool_v AS bool_v,
new_ts_kv_records.str_v AS str_v,
new_ts_kv_records.long_v AS long_v,
new_ts_kv_records.dbl_v AS dbl_v
FROM (SELECT entity_id AS entity_id,
key_id AS key,
ts,
bool_v,
str_v,
long_v,
dbl_v
FROM tenant_ts_kv_old
INNER JOIN ts_kv_dictionary ON (tenant_ts_kv_old.key = ts_kv_dictionary.key)) AS new_ts_kv_records;
BEGIN
OPEN insert_cursor;
LOOP
insert_counter := insert_counter + 1;
FETCH insert_cursor INTO insert_record;
IF NOT FOUND THEN
RAISE NOTICE '% records have been inserted into the new ts_kv table!',insert_counter - 1;
EXIT;
END IF;
INSERT INTO ts_kv(entity_id, key, ts, bool_v, str_v, long_v, dbl_v)
VALUES (insert_record.entity_id, insert_record.key, insert_record.ts, insert_record.bool_v, insert_record.str_v,
insert_record.long_v, insert_record.dbl_v);
IF MOD(insert_counter, insert_size) = 0 THEN
RAISE NOTICE '% records have been inserted into the new ts_kv table!',insert_counter;
END IF;
END LOOP;
CLOSE insert_cursor;
END;
$$;

150
application/src/main/data/upgrade/2.4.3/schema_update_ttl.sql

@ -1,150 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE OR REPLACE FUNCTION to_uuid(IN entity_id varchar, OUT uuid_id uuid) AS
$$
BEGIN
uuid_id := substring(entity_id, 8, 8) || '-' || substring(entity_id, 4, 4) || '-1' || substring(entity_id, 1, 3) ||
'-' || substring(entity_id, 16, 4) || '-' || substring(entity_id, 20, 12);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION delete_device_records_from_ts_kv(tenant_id uuid, customer_id uuid, ttl bigint,
OUT deleted bigint) AS
$$
BEGIN
EXECUTE format(
'WITH deleted AS (DELETE FROM ts_kv WHERE entity_id IN (SELECT device.id as entity_id FROM device WHERE tenant_id = %L and customer_id = %L) AND ts < %L::bigint RETURNING *) SELECT count(*) FROM deleted',
tenant_id, customer_id, ttl) into deleted;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION delete_asset_records_from_ts_kv(tenant_id uuid, customer_id uuid, ttl bigint,
OUT deleted bigint) AS
$$
BEGIN
EXECUTE format(
'WITH deleted AS (DELETE FROM ts_kv WHERE entity_id IN (SELECT asset.id as entity_id FROM asset WHERE tenant_id = %L and customer_id = %L) AND ts < %L::bigint RETURNING *) SELECT count(*) FROM deleted',
tenant_id, customer_id, ttl) into deleted;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION delete_customer_records_from_ts_kv(tenant_id uuid, customer_id uuid, ttl bigint,
OUT deleted bigint) AS
$$
BEGIN
EXECUTE format(
'WITH deleted AS (DELETE FROM ts_kv WHERE entity_id IN (SELECT customer.id as entity_id FROM customer WHERE tenant_id = %L and id = %L) AND ts < %L::bigint RETURNING *) SELECT count(*) FROM deleted',
tenant_id, customer_id, ttl) into deleted;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE cleanup_timeseries_by_ttl(IN null_uuid uuid,
IN system_ttl bigint, INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
tenant_cursor CURSOR FOR select tenant.id as tenant_id
from tenant;
tenant_id_record uuid;
customer_id_record uuid;
tenant_ttl bigint;
customer_ttl bigint;
deleted_for_entities bigint;
tenant_ttl_ts bigint;
customer_ttl_ts bigint;
BEGIN
OPEN tenant_cursor;
FETCH tenant_cursor INTO tenant_id_record;
WHILE FOUND
LOOP
EXECUTE format(
'select attribute_kv.long_v from attribute_kv where attribute_kv.entity_id = %L and attribute_kv.attribute_key = %L',
tenant_id_record, 'TTL') INTO tenant_ttl;
if tenant_ttl IS NULL THEN
tenant_ttl := system_ttl;
END IF;
IF tenant_ttl > 0 THEN
tenant_ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - tenant_ttl::bigint * 1000)::bigint;
deleted_for_entities := delete_device_records_from_ts_kv(tenant_id_record, null_uuid, tenant_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for devices where tenant_id = %', deleted_for_entities, tenant_id_record;
deleted_for_entities := delete_asset_records_from_ts_kv(tenant_id_record, null_uuid, tenant_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for assets where tenant_id = %', deleted_for_entities, tenant_id_record;
END IF;
FOR customer_id_record IN
SELECT customer.id AS customer_id FROM customer WHERE customer.tenant_id = tenant_id_record
LOOP
EXECUTE format(
'select attribute_kv.long_v from attribute_kv where attribute_kv.entity_id = %L and attribute_kv.attribute_key = %L',
customer_id_record, 'TTL') INTO customer_ttl;
IF customer_ttl IS NULL THEN
customer_ttl_ts := tenant_ttl_ts;
ELSE
IF customer_ttl > 0 THEN
customer_ttl_ts :=
(EXTRACT(EPOCH FROM current_timestamp) * 1000 -
customer_ttl::bigint * 1000)::bigint;
END IF;
END IF;
IF customer_ttl_ts IS NOT NULL AND customer_ttl_ts > 0 THEN
deleted_for_entities :=
delete_customer_records_from_ts_kv(tenant_id_record, customer_id_record,
customer_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for customer with id = % where tenant_id = %', deleted_for_entities, customer_id_record, tenant_id_record;
deleted_for_entities :=
delete_device_records_from_ts_kv(tenant_id_record, customer_id_record,
customer_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for devices where tenant_id = % and customer_id = %', deleted_for_entities, tenant_id_record, customer_id_record;
deleted_for_entities := delete_asset_records_from_ts_kv(tenant_id_record,
customer_id_record,
customer_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for assets where tenant_id = % and customer_id = %', deleted_for_entities, tenant_id_record, customer_id_record;
END IF;
END LOOP;
FETCH tenant_cursor INTO tenant_id_record;
END LOOP;
END
$$;
CREATE OR REPLACE PROCEDURE cleanup_events_by_ttl(IN ttl bigint, IN debug_ttl bigint, INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
ttl_ts bigint;
debug_ttl_ts bigint;
ttl_deleted_count bigint DEFAULT 0;
debug_ttl_deleted_count bigint DEFAULT 0;
BEGIN
IF ttl > 0 THEN
ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - ttl::bigint * 1000)::bigint;
EXECUTE format(
'WITH deleted AS (DELETE FROM event WHERE ts < %L::bigint AND (event_type != %L::varchar AND event_type != %L::varchar) RETURNING *) SELECT count(*) FROM deleted', ttl_ts, 'DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN') into ttl_deleted_count;
END IF;
IF debug_ttl > 0 THEN
debug_ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - debug_ttl::bigint * 1000)::bigint;
EXECUTE format(
'WITH deleted AS (DELETE FROM event WHERE ts < %L::bigint AND (event_type = %L::varchar OR event_type = %L::varchar) RETURNING *) SELECT count(*) FROM deleted', debug_ttl_ts, 'DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN') into debug_ttl_deleted_count;
END IF;
RAISE NOTICE 'Events removed by ttl: %', ttl_deleted_count;
RAISE NOTICE 'Debug Events removed by ttl: %', debug_ttl_deleted_count;
deleted := ttl_deleted_count + debug_ttl_deleted_count;
END
$$;

878
application/src/main/data/upgrade/3.0.1/schema_update_to_uuid.sql

@ -1,878 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE OR REPLACE FUNCTION to_uuid(IN entity_id varchar, OUT uuid_id uuid) AS
$$
BEGIN
uuid_id := substring(entity_id, 8, 8) || '-' || substring(entity_id, 4, 4) || '-1' || substring(entity_id, 1, 3) ||
'-' || substring(entity_id, 16, 4) || '-' || substring(entity_id, 20, 12);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION extract_ts(uuid UUID) RETURNS BIGINT AS
$$
DECLARE
bytes bytea;
BEGIN
bytes := uuid_send(uuid);
RETURN
(
(
(get_byte(bytes, 0)::bigint << 24) |
(get_byte(bytes, 1)::bigint << 16) |
(get_byte(bytes, 2)::bigint << 8) |
(get_byte(bytes, 3)::bigint << 0)
) + (
((get_byte(bytes, 4)::bigint << 8 |
get_byte(bytes, 5)::bigint)) << 32
) + (
(((get_byte(bytes, 6)::bigint & 15) << 8 | get_byte(bytes, 7)::bigint) & 4095) << 48
) - 122192928000000000
) / 10000::double precision;
END
$$ LANGUAGE plpgsql
IMMUTABLE
PARALLEL SAFE
RETURNS NULL ON NULL INPUT;
CREATE OR REPLACE FUNCTION column_type_to_uuid(table_name varchar, column_name varchar) RETURNS VOID
LANGUAGE plpgsql AS
$$
BEGIN
execute format('ALTER TABLE %s RENAME COLUMN %s TO old_%s;', table_name, column_name, column_name);
execute format('ALTER TABLE %s ADD COLUMN %s UUID;', table_name, column_name);
execute format('UPDATE %s SET %s = to_uuid(old_%s) WHERE old_%s is not null;', table_name, column_name, column_name, column_name);
execute format('ALTER TABLE %s DROP COLUMN old_%s;', table_name, column_name);
END;
$$;
CREATE OR REPLACE FUNCTION get_column_type(table_name varchar, column_name varchar, OUT data_type varchar) RETURNS varchar
LANGUAGE plpgsql AS
$$
BEGIN
execute (format('SELECT data_type from information_schema.columns where table_name = %L and column_name = %L',
table_name, column_name)) INTO data_type;
END;
$$;
CREATE OR REPLACE PROCEDURE drop_all_idx()
LANGUAGE plpgsql AS
$$
BEGIN
DROP INDEX IF EXISTS idx_alarm_originator_alarm_type;
DROP INDEX IF EXISTS idx_alarm_originator_created_time;
DROP INDEX IF EXISTS idx_alarm_tenant_created_time;
DROP INDEX IF EXISTS idx_event_type_entity_id;
DROP INDEX IF EXISTS idx_relation_to_id;
DROP INDEX IF EXISTS idx_relation_from_id;
DROP INDEX IF EXISTS idx_device_customer_id;
DROP INDEX IF EXISTS idx_device_customer_id_and_type;
DROP INDEX IF EXISTS idx_device_type;
DROP INDEX IF EXISTS idx_asset_customer_id;
DROP INDEX IF EXISTS idx_asset_customer_id_and_type;
DROP INDEX IF EXISTS idx_asset_type;
DROP INDEX IF EXISTS idx_attribute_kv_by_key_and_last_update_ts;
END;
$$;
CREATE OR REPLACE PROCEDURE create_all_idx()
LANGUAGE plpgsql AS
$$
BEGIN
CREATE INDEX IF NOT EXISTS idx_alarm_originator_alarm_type ON alarm(originator_id, type, start_ts DESC);
CREATE INDEX IF NOT EXISTS idx_alarm_originator_created_time ON alarm(originator_id, created_time DESC);
CREATE INDEX IF NOT EXISTS idx_alarm_tenant_created_time ON alarm(tenant_id, created_time DESC);
CREATE INDEX IF NOT EXISTS idx_event_type_entity_id ON event(tenant_id, event_type, entity_type, entity_id);
CREATE INDEX IF NOT EXISTS idx_relation_to_id ON relation(relation_type_group, to_type, to_id);
CREATE INDEX IF NOT EXISTS idx_relation_from_id ON relation(relation_type_group, from_type, from_id);
CREATE INDEX IF NOT EXISTS idx_device_customer_id ON device(tenant_id, customer_id);
CREATE INDEX IF NOT EXISTS idx_device_customer_id_and_type ON device(tenant_id, customer_id, type);
CREATE INDEX IF NOT EXISTS idx_device_type ON device(tenant_id, type);
CREATE INDEX IF NOT EXISTS idx_asset_customer_id ON asset(tenant_id, customer_id);
CREATE INDEX IF NOT EXISTS idx_asset_customer_id_and_type ON asset(tenant_id, customer_id, type);
CREATE INDEX IF NOT EXISTS idx_asset_type ON asset(tenant_id, type);
CREATE INDEX IF NOT EXISTS idx_attribute_kv_by_key_and_last_update_ts ON attribute_kv(entity_id, attribute_key, last_update_ts desc);
END;
$$;
-- admin_settings
CREATE OR REPLACE PROCEDURE update_admin_settings()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'admin_settings';
column_id varchar := 'id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE admin_settings DROP CONSTRAINT admin_settings_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE admin_settings ADD CONSTRAINT admin_settings_pkey PRIMARY KEY (id);
ALTER TABLE admin_settings ADD COLUMN created_time BIGINT;
UPDATE admin_settings SET created_time = extract_ts(id) WHERE id is not null;
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
END;
$$;
-- alarm
CREATE OR REPLACE PROCEDURE update_alarm()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'alarm';
column_id varchar := 'id';
column_originator_id varchar := 'originator_id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE alarm DROP CONSTRAINT alarm_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE alarm ADD COLUMN created_time BIGINT;
UPDATE alarm SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE alarm ADD CONSTRAINT alarm_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_originator_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_originator_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_originator_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_originator_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- asset
CREATE OR REPLACE PROCEDURE update_asset()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'asset';
column_id varchar := 'id';
column_customer_id varchar := 'customer_id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE asset DROP CONSTRAINT asset_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE asset ADD COLUMN created_time BIGINT;
UPDATE asset SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE asset ADD CONSTRAINT asset_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_customer_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_customer_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_customer_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_customer_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
ALTER TABLE asset DROP CONSTRAINT asset_name_unq_key;
PERFORM column_type_to_uuid(table_name, column_tenant_id);
ALTER TABLE asset ADD CONSTRAINT asset_name_unq_key UNIQUE (tenant_id, name);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- attribute_kv
CREATE OR REPLACE PROCEDURE update_attribute_kv()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'attribute_kv';
column_entity_id varchar := 'entity_id';
BEGIN
data_type := get_column_type(table_name, column_entity_id);
IF data_type = 'character varying' THEN
ALTER TABLE attribute_kv DROP CONSTRAINT attribute_kv_pkey;
PERFORM column_type_to_uuid(table_name, column_entity_id);
ALTER TABLE attribute_kv ADD CONSTRAINT attribute_kv_pkey PRIMARY KEY (entity_type, entity_id, attribute_type, attribute_key);
RAISE NOTICE 'Table % column % updated!', table_name, column_entity_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_entity_id;
END IF;
END;
$$;
-- audit_log
CREATE OR REPLACE PROCEDURE update_audit_log()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'audit_log';
column_id varchar := 'id';
column_customer_id varchar := 'customer_id';
column_tenant_id varchar := 'tenant_id';
column_entity_id varchar := 'entity_id';
column_user_id varchar := 'user_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE audit_log DROP CONSTRAINT audit_log_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE audit_log ADD COLUMN created_time BIGINT;
UPDATE audit_log SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE audit_log ADD CONSTRAINT audit_log_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_customer_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_customer_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_customer_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_customer_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
data_type := get_column_type(table_name, column_entity_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_entity_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_entity_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_entity_id;
END IF;
data_type := get_column_type(table_name, column_user_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_user_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_user_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_user_id;
END IF;
END;
$$;
-- component_descriptor
CREATE OR REPLACE PROCEDURE update_component_descriptor()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'component_descriptor';
column_id varchar := 'id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE component_descriptor DROP CONSTRAINT component_descriptor_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE component_descriptor ADD CONSTRAINT component_descriptor_pkey PRIMARY KEY (id);
ALTER TABLE component_descriptor ADD COLUMN created_time BIGINT;
UPDATE component_descriptor SET created_time = extract_ts(id) WHERE id is not null;
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
END;
$$;
-- customer
CREATE OR REPLACE PROCEDURE update_customer()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'customer';
column_id varchar := 'id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE customer DROP CONSTRAINT customer_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE customer ADD CONSTRAINT customer_pkey PRIMARY KEY (id);
ALTER TABLE customer ADD COLUMN created_time BIGINT;
UPDATE customer SET created_time = extract_ts(id) WHERE id is not null;
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- dashboard
CREATE OR REPLACE PROCEDURE update_dashboard()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'dashboard';
column_id varchar := 'id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE dashboard DROP CONSTRAINT dashboard_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE dashboard ADD CONSTRAINT dashboard_pkey PRIMARY KEY (id);
ALTER TABLE dashboard ADD COLUMN created_time BIGINT;
UPDATE dashboard SET created_time = extract_ts(id) WHERE id is not null;
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- device
CREATE OR REPLACE PROCEDURE update_device()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'device';
column_id varchar := 'id';
column_customer_id varchar := 'customer_id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE device DROP CONSTRAINT device_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE device ADD COLUMN created_time BIGINT;
UPDATE device SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE device ADD CONSTRAINT device_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_customer_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_customer_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_customer_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_customer_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
ALTER TABLE device DROP CONSTRAINT device_name_unq_key;
PERFORM column_type_to_uuid(table_name, column_tenant_id);
ALTER TABLE device ADD CONSTRAINT device_name_unq_key UNIQUE (tenant_id, name);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- device_credentials
CREATE OR REPLACE PROCEDURE update_device_credentials()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'device_credentials';
column_id varchar := 'id';
column_device_id varchar := 'device_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE device_credentials DROP CONSTRAINT device_credentials_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE device_credentials ADD COLUMN created_time BIGINT;
UPDATE device_credentials SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE device_credentials ADD CONSTRAINT device_credentials_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_device_id);
IF data_type = 'character varying' THEN
ALTER TABLE device_credentials DROP CONSTRAINT IF EXISTS device_credentials_device_id_unq_key;
PERFORM column_type_to_uuid(table_name, column_device_id);
-- remove duplicate credentials with same device_id
DELETE from device_credentials where id in (
select dc.id
from (
SELECT id, device_id,
ROW_NUMBER() OVER (
PARTITION BY
device_id
ORDER BY
created_time DESC
) row_num
FROM
device_credentials
) as dc
WHERE dc.row_num > 1
);
ALTER TABLE device_credentials ADD CONSTRAINT device_credentials_device_id_unq_key UNIQUE (device_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_device_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_device_id;
END IF;
END;
$$;
-- event
CREATE OR REPLACE PROCEDURE update_event()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'event';
column_id varchar := 'id';
column_entity_id varchar := 'entity_id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE event DROP CONSTRAINT event_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE event ADD COLUMN created_time BIGINT;
UPDATE event SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE event ADD CONSTRAINT event_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
ALTER TABLE event DROP CONSTRAINT event_unq_key;
data_type := get_column_type(table_name, column_entity_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_entity_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_entity_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_entity_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
ALTER TABLE event ADD CONSTRAINT event_unq_key UNIQUE (tenant_id, entity_type, entity_id, event_type, event_uid);
END;
$$;
-- relation
CREATE OR REPLACE PROCEDURE update_relation()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'relation';
column_from_id varchar := 'from_id';
column_to_id varchar := 'to_id';
BEGIN
ALTER TABLE relation DROP CONSTRAINT relation_pkey;
data_type := get_column_type(table_name, column_from_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_from_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_from_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_from_id;
END IF;
data_type := get_column_type(table_name, column_to_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_to_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_to_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_to_id;
END IF;
ALTER TABLE relation ADD CONSTRAINT relation_pkey PRIMARY KEY (from_id, from_type, relation_type_group, relation_type, to_id, to_type);
END;
$$;
-- tb_user
CREATE OR REPLACE PROCEDURE update_tb_user()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'tb_user';
column_id varchar := 'id';
column_customer_id varchar := 'customer_id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE tb_user DROP CONSTRAINT tb_user_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE tb_user ADD COLUMN created_time BIGINT;
UPDATE tb_user SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE tb_user ADD CONSTRAINT tb_user_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_customer_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_customer_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_customer_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_customer_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- tenant
CREATE OR REPLACE PROCEDURE update_tenant()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'tenant';
column_id varchar := 'id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE tenant DROP CONSTRAINT tenant_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE tenant ADD COLUMN created_time BIGINT;
UPDATE tenant SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE tenant ADD CONSTRAINT tenant_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
END;
$$;
-- user_credentials
CREATE OR REPLACE PROCEDURE update_user_credentials()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'user_credentials';
column_id varchar := 'id';
column_user_id varchar := 'user_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE user_credentials DROP CONSTRAINT user_credentials_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE user_credentials ADD COLUMN created_time BIGINT;
UPDATE user_credentials SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE user_credentials ADD CONSTRAINT user_credentials_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_user_id);
IF data_type = 'character varying' THEN
ALTER TABLE user_credentials DROP CONSTRAINT user_credentials_user_id_key;
ALTER TABLE user_credentials RENAME COLUMN user_id TO old_user_id;
ALTER TABLE user_credentials ADD COLUMN user_id UUID UNIQUE;
UPDATE user_credentials SET user_id = to_uuid(old_user_id) WHERE old_user_id is not null;
ALTER TABLE user_credentials DROP COLUMN old_user_id;
RAISE NOTICE 'Table % column % updated!', table_name, column_user_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_user_id;
END IF;
END;
$$;
-- widget_type
CREATE OR REPLACE PROCEDURE update_widget_type()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'widget_type';
column_id varchar := 'id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE widget_type DROP CONSTRAINT widget_type_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE widget_type ADD COLUMN created_time BIGINT;
UPDATE widget_type SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE widget_type ADD CONSTRAINT widget_type_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- widgets_bundle
CREATE OR REPLACE PROCEDURE update_widgets_bundle()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'widgets_bundle';
column_id varchar := 'id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE widgets_bundle DROP CONSTRAINT widgets_bundle_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE widgets_bundle ADD COLUMN created_time BIGINT;
UPDATE widgets_bundle SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE widgets_bundle ADD CONSTRAINT widgets_bundle_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- rule_chain
CREATE OR REPLACE PROCEDURE update_rule_chain()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'rule_chain';
column_id varchar := 'id';
column_first_rule_node_id varchar := 'first_rule_node_id';
column_tenant_id varchar := 'tenant_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE rule_chain DROP CONSTRAINT rule_chain_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE rule_chain ADD COLUMN created_time BIGINT;
UPDATE rule_chain SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE rule_chain ADD CONSTRAINT rule_chain_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_first_rule_node_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_first_rule_node_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_first_rule_node_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_first_rule_node_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
END;
$$;
-- rule_node
CREATE OR REPLACE PROCEDURE update_rule_node()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'rule_node';
column_id varchar := 'id';
column_rule_chain_id varchar := 'rule_chain_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE rule_node DROP CONSTRAINT rule_node_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE rule_node ADD COLUMN created_time BIGINT;
UPDATE rule_node SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE rule_node ADD CONSTRAINT rule_node_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_rule_chain_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_rule_chain_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_rule_chain_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_rule_chain_id;
END IF;
END;
$$;
-- entity_view
CREATE OR REPLACE PROCEDURE update_entity_view()
LANGUAGE plpgsql AS
$$
DECLARE
data_type varchar;
table_name varchar := 'entity_view';
column_id varchar := 'id';
column_entity_id varchar := 'entity_id';
column_tenant_id varchar := 'tenant_id';
column_customer_id varchar := 'customer_id';
BEGIN
data_type := get_column_type(table_name, column_id);
IF data_type = 'character varying' THEN
ALTER TABLE entity_view DROP CONSTRAINT entity_view_pkey;
PERFORM column_type_to_uuid(table_name, column_id);
ALTER TABLE entity_view ADD COLUMN created_time BIGINT;
UPDATE entity_view SET created_time = extract_ts(id) WHERE id is not null;
ALTER TABLE entity_view ADD CONSTRAINT entity_view_pkey PRIMARY KEY (id);
RAISE NOTICE 'Table % column % updated!', table_name, column_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_id;
END IF;
data_type := get_column_type(table_name, column_entity_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_entity_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_entity_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_entity_id;
END IF;
data_type := get_column_type(table_name, column_tenant_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_tenant_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_tenant_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_tenant_id;
END IF;
data_type := get_column_type(table_name, column_customer_id);
IF data_type = 'character varying' THEN
PERFORM column_type_to_uuid(table_name, column_customer_id);
RAISE NOTICE 'Table % column % updated!', table_name, column_customer_id;
ELSE
RAISE NOTICE 'Table % column % already updated!', table_name, column_customer_id;
END IF;
END;
$$;
CREATE TABLE IF NOT EXISTS ts_kv_latest
(
entity_id uuid NOT NULL,
key int NOT NULL,
ts bigint NOT NULL,
bool_v boolean,
str_v varchar(10000000),
long_v bigint,
dbl_v double precision,
json_v json,
CONSTRAINT ts_kv_latest_pkey PRIMARY KEY (entity_id, key)
);
CREATE TABLE IF NOT EXISTS ts_kv_dictionary
(
key varchar(255) NOT NULL,
key_id serial UNIQUE,
CONSTRAINT ts_key_id_pkey PRIMARY KEY (key)
);

17
application/src/main/data/upgrade/3.1.0/schema_update.sql

@ -1,17 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE INDEX IF NOT EXISTS idx_alarm_tenant_alarm_type_created_time ON alarm(tenant_id, type, created_time DESC);

28
application/src/main/data/upgrade/3.1.1/schema_update_after.sql

@ -1,28 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP PROCEDURE IF EXISTS update_tenant_profiles;
DROP PROCEDURE IF EXISTS update_device_profiles;
ALTER TABLE tenant ALTER COLUMN tenant_profile_id SET NOT NULL;
ALTER TABLE tenant DROP CONSTRAINT IF EXISTS fk_tenant_profile;
ALTER TABLE tenant ADD CONSTRAINT fk_tenant_profile FOREIGN KEY (tenant_profile_id) REFERENCES tenant_profile(id);
ALTER TABLE tenant DROP COLUMN IF EXISTS isolated_tb_core;
ALTER TABLE tenant DROP COLUMN IF EXISTS isolated_tb_rule_engine;
ALTER TABLE device ALTER COLUMN device_profile_id SET NOT NULL;
ALTER TABLE device DROP CONSTRAINT IF EXISTS fk_device_profile;
ALTER TABLE device ADD CONSTRAINT fk_device_profile FOREIGN KEY (device_profile_id) REFERENCES device_profile(id);

154
application/src/main/data/upgrade/3.1.1/schema_update_before.sql

@ -1,154 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS oauth2_client_registration_info (
id uuid NOT NULL CONSTRAINT oauth2_client_registration_info_pkey PRIMARY KEY,
enabled boolean,
created_time bigint NOT NULL,
additional_info varchar,
client_id varchar(255),
client_secret varchar(255),
authorization_uri varchar(255),
token_uri varchar(255),
scope varchar(255),
user_info_uri varchar(255),
user_name_attribute_name varchar(255),
jwk_set_uri varchar(255),
client_authentication_method varchar(255),
login_button_label varchar(255),
login_button_icon varchar(255),
allow_user_creation boolean,
activate_user boolean,
type varchar(31),
basic_email_attribute_key varchar(31),
basic_first_name_attribute_key varchar(31),
basic_last_name_attribute_key varchar(31),
basic_tenant_name_strategy varchar(31),
basic_tenant_name_pattern varchar(255),
basic_customer_name_pattern varchar(255),
basic_default_dashboard_name varchar(255),
basic_always_full_screen boolean,
custom_url varchar(255),
custom_username varchar(255),
custom_password varchar(255),
custom_send_token boolean
);
CREATE TABLE IF NOT EXISTS oauth2_client_registration (
id uuid NOT NULL CONSTRAINT oauth2_client_registration_pkey PRIMARY KEY,
created_time bigint NOT NULL,
domain_name varchar(255),
domain_scheme varchar(31),
client_registration_info_id uuid
);
CREATE TABLE IF NOT EXISTS oauth2_client_registration_template (
id uuid NOT NULL CONSTRAINT oauth2_client_registration_template_pkey PRIMARY KEY,
created_time bigint NOT NULL,
additional_info varchar,
provider_id varchar(255),
authorization_uri varchar(255),
token_uri varchar(255),
scope varchar(255),
user_info_uri varchar(255),
user_name_attribute_name varchar(255),
jwk_set_uri varchar(255),
client_authentication_method varchar(255),
type varchar(31),
basic_email_attribute_key varchar(31),
basic_first_name_attribute_key varchar(31),
basic_last_name_attribute_key varchar(31),
basic_tenant_name_strategy varchar(31),
basic_tenant_name_pattern varchar(255),
basic_customer_name_pattern varchar(255),
basic_default_dashboard_name varchar(255),
basic_always_full_screen boolean,
comment varchar,
login_button_icon varchar(255),
login_button_label varchar(255),
help_link varchar(255),
CONSTRAINT oauth2_template_provider_id_unq_key UNIQUE (provider_id)
);
CREATE TABLE IF NOT EXISTS device_profile (
id uuid NOT NULL CONSTRAINT device_profile_pkey PRIMARY KEY,
created_time bigint NOT NULL,
name varchar(255),
type varchar(255),
transport_type varchar(255),
provision_type varchar(255),
profile_data jsonb,
description varchar,
search_text varchar(255),
is_default boolean,
tenant_id uuid,
default_rule_chain_id uuid,
default_queue_name varchar(255),
provision_device_key varchar,
CONSTRAINT device_profile_name_unq_key UNIQUE (tenant_id, name),
CONSTRAINT device_provision_key_unq_key UNIQUE (provision_device_key),
CONSTRAINT fk_default_rule_chain_device_profile FOREIGN KEY (default_rule_chain_id) REFERENCES rule_chain(id)
);
CREATE TABLE IF NOT EXISTS tenant_profile (
id uuid NOT NULL CONSTRAINT tenant_profile_pkey PRIMARY KEY,
created_time bigint NOT NULL,
name varchar(255),
profile_data jsonb,
description varchar,
search_text varchar(255),
is_default boolean,
isolated_tb_core boolean,
isolated_tb_rule_engine boolean,
CONSTRAINT tenant_profile_name_unq_key UNIQUE (name)
);
CREATE OR REPLACE PROCEDURE update_tenant_profiles()
LANGUAGE plpgsql AS
$$
BEGIN
UPDATE tenant as t SET tenant_profile_id = p.id
FROM
(SELECT id from tenant_profile WHERE isolated_tb_core = false AND isolated_tb_rule_engine = false) as p
WHERE t.tenant_profile_id IS NULL AND t.isolated_tb_core = false AND t.isolated_tb_rule_engine = false;
UPDATE tenant as t SET tenant_profile_id = p.id
FROM
(SELECT id from tenant_profile WHERE isolated_tb_core = true AND isolated_tb_rule_engine = false) as p
WHERE t.tenant_profile_id IS NULL AND t.isolated_tb_core = true AND t.isolated_tb_rule_engine = false;
UPDATE tenant as t SET tenant_profile_id = p.id
FROM
(SELECT id from tenant_profile WHERE isolated_tb_core = false AND isolated_tb_rule_engine = true) as p
WHERE t.tenant_profile_id IS NULL AND t.isolated_tb_core = false AND t.isolated_tb_rule_engine = true;
UPDATE tenant as t SET tenant_profile_id = p.id
FROM
(SELECT id from tenant_profile WHERE isolated_tb_core = true AND isolated_tb_rule_engine = true) as p
WHERE t.tenant_profile_id IS NULL AND t.isolated_tb_core = true AND t.isolated_tb_rule_engine = true;
END;
$$;
CREATE OR REPLACE PROCEDURE update_device_profiles()
LANGUAGE plpgsql AS
$$
BEGIN
UPDATE device as d SET device_profile_id = p.id, device_data = '{"configuration":{"type":"DEFAULT"}, "transportConfiguration":{"type":"DEFAULT"}}'
FROM
(SELECT id, tenant_id, name from device_profile) as p
WHERE d.device_profile_id IS NULL AND p.tenant_id = d.tenant_id AND d.type = p.name;
END;
$$;

23
application/src/main/data/upgrade/3.2.1/schema_update.sql

@ -1,23 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
ALTER TABLE widget_type
ADD COLUMN IF NOT EXISTS image varchar (1000000),
ADD COLUMN IF NOT EXISTS description varchar (255);
ALTER TABLE widgets_bundle
ADD COLUMN IF NOT EXISTS image varchar (1000000),
ADD COLUMN IF NOT EXISTS description varchar (255);

87
application/src/main/data/upgrade/3.2.1/schema_update_ttl.sql

@ -1,87 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE OR REPLACE PROCEDURE cleanup_timeseries_by_ttl(IN null_uuid uuid,
IN system_ttl bigint, INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
tenant_cursor CURSOR FOR select tenant.id as tenant_id
from tenant;
tenant_id_record uuid;
customer_id_record uuid;
tenant_ttl bigint;
customer_ttl bigint;
deleted_for_entities bigint;
tenant_ttl_ts bigint;
customer_ttl_ts bigint;
BEGIN
OPEN tenant_cursor;
FETCH tenant_cursor INTO tenant_id_record;
WHILE FOUND
LOOP
EXECUTE format(
'select attribute_kv.long_v from attribute_kv where attribute_kv.entity_id = %L and attribute_kv.attribute_key = %L',
tenant_id_record, 'TTL') INTO tenant_ttl;
if tenant_ttl IS NULL THEN
tenant_ttl := system_ttl;
END IF;
IF tenant_ttl > 0 THEN
tenant_ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - tenant_ttl::bigint * 1000)::bigint;
deleted_for_entities := delete_device_records_from_ts_kv(tenant_id_record, null_uuid, tenant_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for devices where tenant_id = %', deleted_for_entities, tenant_id_record;
deleted_for_entities := delete_asset_records_from_ts_kv(tenant_id_record, null_uuid, tenant_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for assets where tenant_id = %', deleted_for_entities, tenant_id_record;
END IF;
FOR customer_id_record IN
SELECT customer.id AS customer_id FROM customer WHERE customer.tenant_id = tenant_id_record
LOOP
EXECUTE format(
'select attribute_kv.long_v from attribute_kv where attribute_kv.entity_id = %L and attribute_kv.attribute_key = %L',
customer_id_record, 'TTL') INTO customer_ttl;
IF customer_ttl IS NULL THEN
customer_ttl_ts := tenant_ttl_ts;
ELSE
IF customer_ttl > 0 THEN
customer_ttl_ts :=
(EXTRACT(EPOCH FROM current_timestamp) * 1000 -
customer_ttl::bigint * 1000)::bigint;
END IF;
END IF;
IF customer_ttl_ts IS NOT NULL AND customer_ttl_ts > 0 THEN
deleted_for_entities :=
delete_customer_records_from_ts_kv(tenant_id_record, customer_id_record,
customer_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for customer with id = % where tenant_id = %', deleted_for_entities, customer_id_record, tenant_id_record;
deleted_for_entities :=
delete_device_records_from_ts_kv(tenant_id_record, customer_id_record,
customer_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for devices where tenant_id = % and customer_id = %', deleted_for_entities, tenant_id_record, customer_id_record;
deleted_for_entities := delete_asset_records_from_ts_kv(tenant_id_record,
customer_id_record,
customer_ttl_ts);
deleted := deleted + deleted_for_entities;
RAISE NOTICE '% telemetry removed for assets where tenant_id = % and customer_id = %', deleted_for_entities, tenant_id_record, customer_id_record;
END IF;
END LOOP;
FETCH tenant_cursor INTO tenant_id_record;
END LOOP;
END
$$;

216
application/src/main/data/upgrade/3.2.2/schema_update.sql

@ -1,216 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS edge (
id uuid NOT NULL CONSTRAINT edge_pkey PRIMARY KEY,
created_time bigint NOT NULL,
additional_info varchar,
customer_id uuid,
root_rule_chain_id uuid,
type varchar(255),
name varchar(255),
label varchar(255),
routing_key varchar(255),
secret varchar(255),
edge_license_key varchar(30),
cloud_endpoint varchar(255),
search_text varchar(255),
tenant_id uuid,
CONSTRAINT edge_name_unq_key UNIQUE (tenant_id, name),
CONSTRAINT edge_routing_key_unq_key UNIQUE (routing_key)
);
CREATE TABLE IF NOT EXISTS edge_event (
id uuid NOT NULL CONSTRAINT edge_event_pkey PRIMARY KEY,
created_time bigint NOT NULL,
edge_id uuid,
edge_event_type varchar(255),
edge_event_uid varchar(255),
entity_id uuid,
edge_event_action varchar(255),
body varchar(10000000),
tenant_id uuid,
ts bigint NOT NULL
);
CREATE TABLE IF NOT EXISTS resource (
id uuid NOT NULL CONSTRAINT resource_pkey PRIMARY KEY,
created_time bigint NOT NULL,
tenant_id uuid NOT NULL,
title varchar(255) NOT NULL,
resource_type varchar(32) NOT NULL,
resource_key varchar(255) NOT NULL,
search_text varchar(255),
file_name varchar(255) NOT NULL,
data varchar,
CONSTRAINT resource_unq_key UNIQUE (tenant_id, resource_type, resource_key)
);
CREATE TABLE IF NOT EXISTS ota_package (
id uuid NOT NULL CONSTRAINT ota_package_pkey PRIMARY KEY,
created_time bigint NOT NULL,
tenant_id uuid NOT NULL,
device_profile_id uuid,
type varchar(32) NOT NULL,
title varchar(255) NOT NULL,
version varchar(255) NOT NULL,
tag varchar(255),
url varchar(255),
file_name varchar(255),
content_type varchar(255),
checksum_algorithm varchar(32),
checksum varchar(1020),
data oid,
data_size bigint,
additional_info varchar,
search_text varchar(255),
CONSTRAINT ota_package_tenant_title_version_unq_key UNIQUE (tenant_id, title, version)
);
CREATE TABLE IF NOT EXISTS oauth2_params (
id uuid NOT NULL CONSTRAINT oauth2_params_pkey PRIMARY KEY,
enabled boolean,
tenant_id uuid,
created_time bigint NOT NULL
);
CREATE TABLE IF NOT EXISTS oauth2_registration (
id uuid NOT NULL CONSTRAINT oauth2_registration_pkey PRIMARY KEY,
oauth2_params_id uuid NOT NULL,
created_time bigint NOT NULL,
additional_info varchar,
client_id varchar(255),
client_secret varchar(2048),
authorization_uri varchar(255),
token_uri varchar(255),
scope varchar(255),
platforms varchar(255),
user_info_uri varchar(255),
user_name_attribute_name varchar(255),
jwk_set_uri varchar(255),
client_authentication_method varchar(255),
login_button_label varchar(255),
login_button_icon varchar(255),
allow_user_creation boolean,
activate_user boolean,
type varchar(31),
basic_email_attribute_key varchar(31),
basic_first_name_attribute_key varchar(31),
basic_last_name_attribute_key varchar(31),
basic_tenant_name_strategy varchar(31),
basic_tenant_name_pattern varchar(255),
basic_customer_name_pattern varchar(255),
basic_default_dashboard_name varchar(255),
basic_always_full_screen boolean,
custom_url varchar(255),
custom_username varchar(255),
custom_password varchar(255),
custom_send_token boolean,
CONSTRAINT fk_registration_oauth2_params FOREIGN KEY (oauth2_params_id) REFERENCES oauth2_params(id) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS oauth2_domain (
id uuid NOT NULL CONSTRAINT oauth2_domain_pkey PRIMARY KEY,
oauth2_params_id uuid NOT NULL,
created_time bigint NOT NULL,
domain_name varchar(255),
domain_scheme varchar(31),
CONSTRAINT fk_domain_oauth2_params FOREIGN KEY (oauth2_params_id) REFERENCES oauth2_params(id) ON DELETE CASCADE,
CONSTRAINT oauth2_domain_unq_key UNIQUE (oauth2_params_id, domain_name, domain_scheme)
);
CREATE TABLE IF NOT EXISTS oauth2_mobile (
id uuid NOT NULL CONSTRAINT oauth2_mobile_pkey PRIMARY KEY,
oauth2_params_id uuid NOT NULL,
created_time bigint NOT NULL,
pkg_name varchar(255),
app_secret varchar(2048),
CONSTRAINT fk_mobile_oauth2_params FOREIGN KEY (oauth2_params_id) REFERENCES oauth2_params(id) ON DELETE CASCADE,
CONSTRAINT oauth2_mobile_unq_key UNIQUE (oauth2_params_id, pkg_name)
);
ALTER TABLE dashboard
ADD COLUMN IF NOT EXISTS image varchar(1000000),
ADD COLUMN IF NOT EXISTS mobile_hide boolean DEFAULT false,
ADD COLUMN IF NOT EXISTS mobile_order int;
ALTER TABLE device_profile
ADD COLUMN IF NOT EXISTS image varchar(1000000),
ADD COLUMN IF NOT EXISTS firmware_id uuid,
ADD COLUMN IF NOT EXISTS software_id uuid,
ADD COLUMN IF NOT EXISTS default_dashboard_id uuid;
ALTER TABLE device
ADD COLUMN IF NOT EXISTS firmware_id uuid,
ADD COLUMN IF NOT EXISTS software_id uuid;
ALTER TABLE alarm
ADD COLUMN IF NOT EXISTS customer_id uuid;
DELETE FROM relation WHERE from_type = 'TENANT' AND relation_type_group = 'RULE_CHAIN';
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'fk_firmware_device_profile') THEN
ALTER TABLE device_profile
ADD CONSTRAINT fk_firmware_device_profile
FOREIGN KEY (firmware_id) REFERENCES ota_package(id);
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'fk_software_device_profile') THEN
ALTER TABLE device_profile
ADD CONSTRAINT fk_software_device_profile
FOREIGN KEY (firmware_id) REFERENCES ota_package(id);
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'fk_default_dashboard_device_profile') THEN
ALTER TABLE device_profile
ADD CONSTRAINT fk_default_dashboard_device_profile
FOREIGN KEY (default_dashboard_id) REFERENCES dashboard(id);
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'fk_firmware_device') THEN
ALTER TABLE device
ADD CONSTRAINT fk_firmware_device
FOREIGN KEY (firmware_id) REFERENCES ota_package(id);
END IF;
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'fk_software_device') THEN
ALTER TABLE device
ADD CONSTRAINT fk_software_device
FOREIGN KEY (firmware_id) REFERENCES ota_package(id);
END IF;
END;
$$;
ALTER TABLE api_usage_state
ADD COLUMN IF NOT EXISTS alarm_exec VARCHAR(32);
UPDATE api_usage_state SET alarm_exec = 'ENABLED' WHERE alarm_exec IS NULL;
CREATE TABLE IF NOT EXISTS rpc (
id uuid NOT NULL CONSTRAINT rpc_pkey PRIMARY KEY,
created_time bigint NOT NULL,
tenant_id uuid NOT NULL,
device_id uuid NOT NULL,
expiration_time bigint NOT NULL,
request varchar(10000000) NOT NULL,
response varchar(10000000),
additional_info varchar(10000000),
status varchar(255) NOT NULL
);
CREATE INDEX IF NOT EXISTS idx_rpc_tenant_id_device_id ON rpc(tenant_id, device_id);

90
application/src/main/data/upgrade/3.2.2/schema_update_event.sql

@ -1,90 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- PROCEDURE: public.cleanup_events_by_ttl(bigint, bigint, bigint)
DROP PROCEDURE IF EXISTS public.cleanup_events_by_ttl(bigint, bigint, bigint);
CREATE OR REPLACE PROCEDURE public.cleanup_events_by_ttl(
ttl bigint,
debug_ttl bigint,
INOUT deleted bigint)
LANGUAGE 'plpgsql'
AS $BODY$
DECLARE
ttl_ts bigint;
debug_ttl_ts bigint;
ttl_deleted_count bigint DEFAULT 0;
debug_ttl_deleted_count bigint DEFAULT 0;
BEGIN
IF ttl > 0 THEN
ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - ttl::bigint * 1000)::bigint;
DELETE FROM event
WHERE ts < ttl_ts
AND NOT event_type IN ('DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN', 'DEBUG_CONVERTER', 'DEBUG_INTEGRATION');
GET DIAGNOSTICS ttl_deleted_count = ROW_COUNT;
END IF;
IF debug_ttl > 0 THEN
debug_ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - debug_ttl::bigint * 1000)::bigint;
DELETE FROM event
WHERE ts < debug_ttl_ts
AND event_type IN ('DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN', 'DEBUG_CONVERTER', 'DEBUG_INTEGRATION');
GET DIAGNOSTICS debug_ttl_deleted_count = ROW_COUNT;
END IF;
RAISE NOTICE 'Events removed by ttl: %', ttl_deleted_count;
RAISE NOTICE 'Debug Events removed by ttl: %', debug_ttl_deleted_count;
deleted := ttl_deleted_count + debug_ttl_deleted_count;
END
$BODY$;
-- Index: idx_event_ts
DROP INDEX IF EXISTS public.idx_event_ts;
-- Hint: add CONCURRENTLY to CREATE INDEX query in case of more then 1 million records or during live update
-- CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_event_ts
CREATE INDEX IF NOT EXISTS idx_event_ts
ON public.event
(ts DESC NULLS LAST)
WITH (FILLFACTOR=95);
COMMENT ON INDEX public.idx_event_ts
IS 'This index helps to delete events by TTL using timestamp';
-- Index: idx_event_tenant_entity_type_entity_event_type_created_time_des
DROP INDEX IF EXISTS public.idx_event_tenant_entity_type_entity_event_type_created_time_des;
-- CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_event_tenant_entity_type_entity_event_type_created_time_des
CREATE INDEX IF NOT EXISTS idx_event_tenant_entity_type_entity_event_type_created_time_des
ON public.event
(tenant_id ASC, entity_type ASC, entity_id ASC, event_type ASC, created_time DESC NULLS LAST)
WITH (FILLFACTOR=95);
COMMENT ON INDEX public.idx_event_tenant_entity_type_entity_event_type_created_time_des
IS 'This index helps to open latest events on UI fast';
-- Index: idx_event_type_entity_id
-- Description: replaced with more suitable idx_event_tenant_entity_type_entity_event_type_created_time_des
DROP INDEX IF EXISTS public.idx_event_type_entity_id;

32
application/src/main/data/upgrade/3.2.2/schema_update_ttl.sql

@ -1,32 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE OR REPLACE PROCEDURE cleanup_edge_events_by_ttl(IN ttl bigint, INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
ttl_ts bigint;
ttl_deleted_count bigint DEFAULT 0;
BEGIN
IF ttl > 0 THEN
ttl_ts := (EXTRACT(EPOCH FROM current_timestamp) * 1000 - ttl::bigint * 1000)::bigint;
EXECUTE format(
'WITH deleted AS (DELETE FROM edge_event WHERE ts < %L::bigint RETURNING *) SELECT count(*) FROM deleted', ttl_ts) into ttl_deleted_count;
END IF;
RAISE NOTICE 'Edge events removed by ttl: %', ttl_deleted_count;
deleted := ttl_deleted_count;
END
$$;

71
application/src/main/data/upgrade/3.3.2/schema_update.sql

@ -1,71 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS entity_alarm (
tenant_id uuid NOT NULL,
entity_type varchar(32),
entity_id uuid NOT NULL,
created_time bigint NOT NULL,
alarm_type varchar(255) NOT NULL,
customer_id uuid,
alarm_id uuid,
CONSTRAINT entity_alarm_pkey PRIMARY KEY (entity_id, alarm_id),
CONSTRAINT fk_entity_alarm_id FOREIGN KEY (alarm_id) REFERENCES alarm(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_alarm_tenant_status_created_time ON alarm(tenant_id, status, created_time DESC);
CREATE INDEX IF NOT EXISTS idx_entity_alarm_created_time ON entity_alarm(tenant_id, entity_id, created_time DESC);
CREATE INDEX IF NOT EXISTS idx_entity_alarm_alarm_id ON entity_alarm(alarm_id);
INSERT INTO entity_alarm(tenant_id, entity_type, entity_id, created_time, alarm_type, customer_id, alarm_id)
SELECT tenant_id,
CASE
WHEN originator_type = 0 THEN 'TENANT'
WHEN originator_type = 1 THEN 'CUSTOMER'
WHEN originator_type = 2 THEN 'USER'
WHEN originator_type = 3 THEN 'DASHBOARD'
WHEN originator_type = 4 THEN 'ASSET'
WHEN originator_type = 5 THEN 'DEVICE'
WHEN originator_type = 6 THEN 'ALARM'
WHEN originator_type = 7 THEN 'RULE_CHAIN'
WHEN originator_type = 8 THEN 'RULE_NODE'
WHEN originator_type = 9 THEN 'ENTITY_VIEW'
WHEN originator_type = 10 THEN 'WIDGETS_BUNDLE'
WHEN originator_type = 11 THEN 'WIDGET_TYPE'
WHEN originator_type = 12 THEN 'TENANT_PROFILE'
WHEN originator_type = 13 THEN 'DEVICE_PROFILE'
WHEN originator_type = 14 THEN 'API_USAGE_STATE'
WHEN originator_type = 15 THEN 'TB_RESOURCE'
WHEN originator_type = 16 THEN 'OTA_PACKAGE'
WHEN originator_type = 17 THEN 'EDGE'
WHEN originator_type = 18 THEN 'RPC'
else 'UNKNOWN'
END,
originator_id,
created_time,
type,
customer_id,
id
FROM alarm
ON CONFLICT DO NOTHING;
INSERT INTO entity_alarm(tenant_id, entity_type, entity_id, created_time, alarm_type, customer_id, alarm_id)
SELECT a.tenant_id, r.from_type, r.from_id, created_time, type, customer_id, id
FROM alarm a
INNER JOIN relation r ON r.relation_type_group = 'ALARM' and r.relation_type = 'ANY' and a.id = r.to_id
ON CONFLICT DO NOTHING;
DELETE FROM relation r WHERE r.relation_type_group = 'ALARM';

213
application/src/main/data/upgrade/3.3.2/schema_update_lwm2m_bootstrap.sql

@ -1,213 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE OR REPLACE PROCEDURE update_profile_bootstrap()
LANGUAGE plpgsql AS
$$
BEGIN
UPDATE device_profile
SET profile_data = jsonb_set(
profile_data,
'{transportConfiguration}',
get_bootstrap(
profile_data::jsonb #> '{transportConfiguration}',
subquery.publickey_bs,
subquery.publickey_lw,
profile_data::json #>> '{transportConfiguration, bootstrap, bootstrapServer, securityMode}',
profile_data::json #>> '{transportConfiguration, bootstrap, lwm2mServer, securityMode}'),
true)
FROM (
SELECT id,
encode(
decode(profile_data::json #> '{transportConfiguration,bootstrap,bootstrapServer}' ->>
'serverPublicKey', 'hex')::bytea, 'base64') AS publickey_bs,
encode(
decode(profile_data::json #> '{transportConfiguration,bootstrap,lwm2mServer}' ->>
'serverPublicKey', 'hex')::bytea, 'base64') AS publickey_lw
FROM device_profile
WHERE transport_type = 'LWM2M'
) AS subquery
WHERE device_profile.id = subquery.id
AND subquery.publickey_bs IS NOT NULL
AND subquery.publickey_lw IS NOT NULL;
END;
$$;
CREATE OR REPLACE FUNCTION get_bootstrap(transport_configuration_in jsonb, publickey_bs text,
publickey_lw text, security_mode_bs text,
security_mode_lw text) RETURNS jsonb AS
$$
DECLARE
bootstrap_new jsonb;
bootstrap_in jsonb;
BEGIN
IF security_mode_lw IS NULL THEN
security_mode_lw := 'NO_SEC';
END IF;
IF security_mode_bs IS NULL THEN
security_mode_bs := 'NO_SEC';
END IF;
bootstrap_in := transport_configuration_in::jsonb #> '{bootstrap}';
bootstrap_new := json_build_array(
json_build_object('shortServerId', bootstrap_in::json #> '{bootstrapServer}' -> 'serverId',
'securityMode', security_mode_bs,
'binding', bootstrap_in::json #> '{servers}' ->> 'binding',
'lifetime', bootstrap_in::json #> '{servers}' -> 'lifetime',
'notifIfDisabled', bootstrap_in::json #> '{servers}' -> 'notifIfDisabled',
'defaultMinPeriod', bootstrap_in::json #> '{servers}' -> 'defaultMinPeriod',
'host', bootstrap_in::json #> '{bootstrapServer}' ->> 'host',
'port', bootstrap_in::json #> '{bootstrapServer}' -> 'port',
'serverPublicKey', publickey_bs,
'bootstrapServerIs', true,
'clientHoldOffTime', bootstrap_in::json #> '{bootstrapServer}' -> 'clientHoldOffTime',
'bootstrapServerAccountTimeout',
bootstrap_in::json #> '{bootstrapServer}' -> 'bootstrapServerAccountTimeout'
),
json_build_object('shortServerId', bootstrap_in::json #> '{lwm2mServer}' -> 'serverId',
'securityMode', security_mode_lw,
'binding', bootstrap_in::json #> '{servers}' ->> 'binding',
'lifetime', bootstrap_in::json #> '{servers}' -> 'lifetime',
'notifIfDisabled', bootstrap_in::json #> '{servers}' -> 'notifIfDisabled',
'defaultMinPeriod', bootstrap_in::json #> '{servers}' -> 'defaultMinPeriod',
'host', bootstrap_in::json #> '{lwm2mServer}' ->> 'host',
'port', bootstrap_in::json #> '{lwm2mServer}' -> 'port',
'serverPublicKey', publickey_lw,
'bootstrapServerIs', false,
'clientHoldOffTime', bootstrap_in::json #> '{lwm2mServer}' -> 'clientHoldOffTime',
'bootstrapServerAccountTimeout',
bootstrap_in::json #> '{lwm2mServer}' -> 'bootstrapServerAccountTimeout'
)
);
RETURN jsonb_set(
transport_configuration_in,
'{bootstrap}',
bootstrap_new,
true) || '{"bootstrapServerUpdateEnable": true}';
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE update_device_credentials_to_base64_and_bootstrap()
LANGUAGE plpgsql AS
$$
BEGIN
UPDATE device_credentials
SET credentials_value = get_device_and_bootstrap(credentials_value::text)
WHERE credentials_type = 'LWM2M_CREDENTIALS';
END;
$$;
CREATE OR REPLACE FUNCTION get_device_and_bootstrap(IN credentials_value text, OUT credentials_value_new text)
LANGUAGE plpgsql AS
$$
DECLARE
client_secret_key text;
client_public_key_or_id text;
client_key_value_object jsonb;
client_bootstrap_server_value_object jsonb;
client_bootstrap_server_object jsonb;
client_bootstrap_object jsonb;
BEGIN
credentials_value_new := credentials_value;
IF credentials_value::jsonb #> '{client}' ->> 'securityConfigClientMode' = 'RPK' AND
NULLIF((credentials_value::jsonb #> '{client}' ->> 'key' ~ '^[0-9a-fA-F]+$')::text, 'false') = 'true' THEN
client_public_key_or_id := encode(decode(credentials_value::jsonb #> '{client}' ->> 'key', 'hex')::bytea, 'base64');
client_key_value_object := json_build_object(
'endpoint', credentials_value::jsonb #> '{client}' ->> 'endpoint',
'securityConfigClientMode', credentials_value::jsonb #> '{client}' ->> 'securityConfigClientMode',
'key', client_public_key_or_id);
credentials_value_new :=
credentials_value_new::jsonb || json_build_object('client', client_key_value_object)::jsonb;
END IF;
IF credentials_value::jsonb #> '{client}' ->> 'securityConfigClientMode' = 'X509' AND
NULLIF((credentials_value::jsonb #> '{client}' ->> 'cert' ~ '^[0-9a-fA-F]+$')::text, 'false') = 'true' THEN
client_public_key_or_id :=
encode(decode(credentials_value::jsonb #> '{client}' ->> 'cert', 'hex')::bytea, 'base64');
client_key_value_object := json_build_object(
'endpoint', credentials_value::jsonb #> '{client}' ->> 'endpoint',
'securityConfigClientMode', credentials_value::jsonb #> '{client}' ->> 'securityConfigClientMode',
'cert', client_public_key_or_id);
credentials_value_new :=
credentials_value_new::jsonb || json_build_object('client', client_key_value_object)::jsonb;
END IF;
IF credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'securityMode' = 'RPK' OR
credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'securityMode' = 'X509' THEN
IF NULLIF((credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'clientSecretKey' ~ '^[0-9a-fA-F]+$')::text,
'false') = 'true' AND
NULLIF(
(credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'clientPublicKeyOrId' ~ '^[0-9a-fA-F]+$')::text,
'false') = 'true' THEN
client_secret_key :=
encode(decode(credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'clientSecretKey', 'hex')::bytea,
'base64');
client_public_key_or_id := encode(
decode(credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'clientPublicKeyOrId', 'hex')::bytea,
'base64');
client_bootstrap_server_value_object := jsonb_build_object(
'securityMode', credentials_value::jsonb #> '{bootstrap,lwm2mServer}' ->> 'securityMode',
'clientPublicKeyOrId', client_public_key_or_id,
'clientSecretKey', client_secret_key
);
client_bootstrap_server_object := jsonb_build_object('lwm2mServer', client_bootstrap_server_value_object::jsonb);
client_bootstrap_object := credentials_value_new::jsonb #> '{bootstrap}' || client_bootstrap_server_object::jsonb;
credentials_value_new :=
jsonb_set(credentials_value_new::jsonb, '{bootstrap}', client_bootstrap_object::jsonb, false)::jsonb;
END IF;
END IF;
IF credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'securityMode' = 'RPK' OR
credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'securityMode' = 'X509' THEN
IF NULLIF(
(credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'clientSecretKey' ~ '^[0-9a-fA-F]+$')::text,
'false') = 'true' AND
NULLIF(
(credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'clientPublicKeyOrId' ~ '^[0-9a-fA-F]+$')::text,
'false') = 'true' THEN
client_secret_key :=
encode(
decode(credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'clientSecretKey', 'hex')::bytea,
'base64');
client_public_key_or_id := encode(
decode(credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'clientPublicKeyOrId', 'hex')::bytea,
'base64');
client_bootstrap_server_value_object := jsonb_build_object(
'securityMode', credentials_value::jsonb #> '{bootstrap,bootstrapServer}' ->> 'securityMode',
'clientPublicKeyOrId', client_public_key_or_id,
'clientSecretKey', client_secret_key
);
client_bootstrap_server_object :=
jsonb_build_object('bootstrapServer', client_bootstrap_server_value_object::jsonb);
client_bootstrap_object := credentials_value_new::jsonb #> '{bootstrap}' || client_bootstrap_server_object::jsonb;
credentials_value_new :=
jsonb_set(credentials_value_new::jsonb, '{bootstrap}', client_bootstrap_object::jsonb, false)::jsonb;
END IF;
END IF;
END;
$$;

50
application/src/main/data/upgrade/3.3.3/schema_event_ttl_procedure.sql

@ -1,50 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP PROCEDURE IF EXISTS public.cleanup_events_by_ttl(bigint, bigint, bigint);
CREATE OR REPLACE PROCEDURE cleanup_events_by_ttl(
IN regular_events_start_ts bigint,
IN regular_events_end_ts bigint,
IN debug_events_start_ts bigint,
IN debug_events_end_ts bigint,
INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
ttl_deleted_count bigint DEFAULT 0;
debug_ttl_deleted_count bigint DEFAULT 0;
BEGIN
IF regular_events_start_ts > 0 AND regular_events_end_ts > 0 THEN
EXECUTE format(
'WITH deleted AS (DELETE FROM event WHERE id in (SELECT id from event WHERE ts > %L::bigint AND ts < %L::bigint AND ' ||
'(event_type != %L::varchar AND event_type != %L::varchar)) RETURNING *) ' ||
'SELECT count(*) FROM deleted', regular_events_start_ts, regular_events_end_ts,
'DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN') into ttl_deleted_count;
END IF;
IF debug_events_start_ts > 0 AND debug_events_end_ts > 0 THEN
EXECUTE format(
'WITH deleted AS (DELETE FROM event WHERE id in (SELECT id from event WHERE ts > %L::bigint AND ts < %L::bigint AND ' ||
'(event_type = %L::varchar OR event_type = %L::varchar)) RETURNING *) ' ||
'SELECT count(*) FROM deleted', debug_events_start_ts, debug_events_end_ts,
'DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN') into debug_ttl_deleted_count;
END IF;
RAISE NOTICE 'Events removed by ttl: %', ttl_deleted_count;
RAISE NOTICE 'Debug Events removed by ttl: %', debug_ttl_deleted_count;
deleted := ttl_deleted_count + debug_ttl_deleted_count;
END
$$;

29
application/src/main/data/upgrade/3.3.3/schema_update.sql

@ -1,29 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DELETE from ota_package as op WHERE NOT EXISTS(SELECT * FROM device_profile dp where op.device_profile_id = dp.id);
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'fk_device_profile_ota_package') THEN
ALTER TABLE ota_package
ADD CONSTRAINT fk_device_profile_ota_package
FOREIGN KEY (device_profile_id) REFERENCES device_profile (id)
ON DELETE CASCADE;
END IF;
END;
$$;

140
application/src/main/data/upgrade/3.3.4/schema_update.sql

@ -1,140 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
ALTER TABLE device
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE device_profile
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE asset
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE rule_chain
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE rule_node
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE dashboard
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE customer
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE widgets_bundle
ADD COLUMN IF NOT EXISTS external_id UUID;
ALTER TABLE entity_view
ADD COLUMN IF NOT EXISTS external_id UUID;
CREATE INDEX IF NOT EXISTS idx_rule_node_external_id ON rule_node(rule_chain_id, external_id);
CREATE INDEX IF NOT EXISTS idx_rule_node_type ON rule_node(type);
ALTER TABLE admin_settings
ADD COLUMN IF NOT EXISTS tenant_id uuid NOT NULL DEFAULT '13814000-1dd2-11b2-8080-808080808080';
CREATE TABLE IF NOT EXISTS queue (
id uuid NOT NULL CONSTRAINT queue_pkey PRIMARY KEY,
created_time bigint NOT NULL,
tenant_id uuid,
name varchar(255),
topic varchar(255),
poll_interval int,
partitions int,
consumer_per_partition boolean,
pack_processing_timeout bigint,
submit_strategy varchar(255),
processing_strategy varchar(255),
additional_info varchar
);
CREATE TABLE IF NOT EXISTS user_auth_settings (
id uuid NOT NULL CONSTRAINT user_auth_settings_pkey PRIMARY KEY,
created_time bigint NOT NULL,
user_id uuid UNIQUE NOT NULL CONSTRAINT fk_user_auth_settings_user_id REFERENCES tb_user(id),
two_fa_settings varchar
);
CREATE INDEX IF NOT EXISTS idx_api_usage_state_entity_id ON api_usage_state(entity_id);
ALTER TABLE tenant_profile DROP COLUMN IF EXISTS isolated_tb_core;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'device_external_id_unq_key') THEN
ALTER TABLE device ADD CONSTRAINT device_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'device_profile_external_id_unq_key') THEN
ALTER TABLE device_profile ADD CONSTRAINT device_profile_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'asset_external_id_unq_key') THEN
ALTER TABLE asset ADD CONSTRAINT asset_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'rule_chain_external_id_unq_key') THEN
ALTER TABLE rule_chain ADD CONSTRAINT rule_chain_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'dashboard_external_id_unq_key') THEN
ALTER TABLE dashboard ADD CONSTRAINT dashboard_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'customer_external_id_unq_key') THEN
ALTER TABLE customer ADD CONSTRAINT customer_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'widgets_bundle_external_id_unq_key') THEN
ALTER TABLE widgets_bundle ADD CONSTRAINT widgets_bundle_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;
DO
$$
BEGIN
IF NOT EXISTS(SELECT 1 FROM pg_constraint WHERE conname = 'entity_view_external_id_unq_key') THEN
ALTER TABLE entity_view ADD CONSTRAINT entity_view_external_id_unq_key UNIQUE (tenant_id, external_id);
END IF;
END;
$$;

234
application/src/main/data/upgrade/3.4.0/schema_update.sql

@ -1,234 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS rule_node_debug_event (
id uuid NOT NULL,
tenant_id uuid NOT NULL ,
ts bigint NOT NULL,
entity_id uuid NOT NULL,
service_id varchar,
e_type varchar,
e_entity_id uuid,
e_entity_type varchar,
e_msg_id uuid,
e_msg_type varchar,
e_data_type varchar,
e_relation_type varchar,
e_data varchar,
e_metadata varchar,
e_error varchar
) PARTITION BY RANGE (ts);
CREATE TABLE IF NOT EXISTS rule_chain_debug_event (
id uuid NOT NULL,
tenant_id uuid NOT NULL,
ts bigint NOT NULL,
entity_id uuid NOT NULL,
service_id varchar NOT NULL,
e_message varchar,
e_error varchar
) PARTITION BY RANGE (ts);
CREATE TABLE IF NOT EXISTS stats_event (
id uuid NOT NULL,
tenant_id uuid NOT NULL,
ts bigint NOT NULL,
entity_id uuid NOT NULL,
service_id varchar NOT NULL,
e_messages_processed bigint NOT NULL,
e_errors_occurred bigint NOT NULL
) PARTITION BY RANGE (ts);
CREATE TABLE IF NOT EXISTS lc_event (
id uuid NOT NULL,
tenant_id uuid NOT NULL,
ts bigint NOT NULL,
entity_id uuid NOT NULL,
service_id varchar NOT NULL,
e_type varchar NOT NULL,
e_success boolean NOT NULL,
e_error varchar
) PARTITION BY RANGE (ts);
CREATE TABLE IF NOT EXISTS error_event (
id uuid NOT NULL,
tenant_id uuid NOT NULL,
ts bigint NOT NULL,
entity_id uuid NOT NULL,
service_id varchar NOT NULL,
e_method varchar NOT NULL,
e_error varchar
) PARTITION BY RANGE (ts);
CREATE INDEX IF NOT EXISTS idx_rule_node_debug_event_main
ON rule_node_debug_event (tenant_id ASC, entity_id ASC, ts DESC NULLS LAST) WITH (FILLFACTOR=95);
CREATE INDEX IF NOT EXISTS idx_rule_chain_debug_event_main
ON rule_chain_debug_event (tenant_id ASC, entity_id ASC, ts DESC NULLS LAST) WITH (FILLFACTOR=95);
CREATE INDEX IF NOT EXISTS idx_stats_event_main
ON stats_event (tenant_id ASC, entity_id ASC, ts DESC NULLS LAST) WITH (FILLFACTOR=95);
CREATE INDEX IF NOT EXISTS idx_lc_event_main
ON lc_event (tenant_id ASC, entity_id ASC, ts DESC NULLS LAST) WITH (FILLFACTOR=95);
CREATE INDEX IF NOT EXISTS idx_error_event_main
ON error_event (tenant_id ASC, entity_id ASC, ts DESC NULLS LAST) WITH (FILLFACTOR=95);
CREATE OR REPLACE FUNCTION to_safe_json(p_json text) RETURNS json
LANGUAGE plpgsql AS
$$
BEGIN
return REPLACE(p_json, '\u0000', '' )::json;
EXCEPTION
WHEN OTHERS THEN
return '{}'::json;
END;
$$;
-- Useful to migrate old events to the new table structure;
CREATE OR REPLACE PROCEDURE migrate_regular_events(IN start_ts_in_ms bigint, IN end_ts_in_ms bigint, IN partition_size_in_hours int)
LANGUAGE plpgsql AS
$$
DECLARE
partition_size_in_ms bigint;
p record;
table_name varchar;
BEGIN
partition_size_in_ms = partition_size_in_hours * 3600 * 1000;
FOR p IN SELECT DISTINCT event_type as event_type, (created_time - created_time % partition_size_in_ms) as partition_ts FROM event e WHERE e.event_type in ('STATS', 'LC_EVENT', 'ERROR') and ts >= start_ts_in_ms and ts < end_ts_in_ms
LOOP
IF p.event_type = 'STATS' THEN
table_name := 'stats_event';
ELSEIF p.event_type = 'LC_EVENT' THEN
table_name := 'lc_event';
ELSEIF p.event_type = 'ERROR' THEN
table_name := 'error_event';
END IF;
RAISE NOTICE '[%] Partition to create : [%-%]', table_name, p.partition_ts, (p.partition_ts + partition_size_in_ms);
EXECUTE format('CREATE TABLE IF NOT EXISTS %s_%s PARTITION OF %s FOR VALUES FROM ( %s ) TO ( %s )', table_name, p.partition_ts, table_name, p.partition_ts, (p.partition_ts + partition_size_in_ms));
END LOOP;
INSERT INTO stats_event
SELECT id,
tenant_id,
ts,
entity_id,
body ->> 'server',
(body ->> 'messagesProcessed')::bigint,
(body ->> 'errorsOccurred')::bigint
FROM
(select id, tenant_id, ts, entity_id, to_safe_json(body) as body
FROM event WHERE ts >= start_ts_in_ms and ts < end_ts_in_ms AND event_type = 'STATS' AND to_safe_json(body) ->> 'server' IS NOT NULL
) safe_event
ON CONFLICT DO NOTHING;
INSERT INTO lc_event
SELECT id,
tenant_id,
ts,
entity_id,
body ->> 'server',
body ->> 'event',
(body ->> 'success')::boolean,
body ->> 'error'
FROM
(select id, tenant_id, ts, entity_id, to_safe_json(body) as body
FROM event WHERE ts >= start_ts_in_ms and ts < end_ts_in_ms AND event_type = 'LC_EVENT' AND to_safe_json(body) ->> 'server' IS NOT NULL
) safe_event
ON CONFLICT DO NOTHING;
INSERT INTO error_event
SELECT id,
tenant_id,
ts,
entity_id,
body ->> 'server',
body ->> 'method',
body ->> 'error'
FROM
(select id, tenant_id, ts, entity_id, to_safe_json(body) as body
FROM event WHERE ts >= start_ts_in_ms and ts < end_ts_in_ms AND event_type = 'ERROR' AND to_safe_json(body) ->> 'server' IS NOT NULL
) safe_event
ON CONFLICT DO NOTHING;
END
$$;
-- Useful to migrate old debug events to the new table structure;
CREATE OR REPLACE PROCEDURE migrate_debug_events(IN start_ts_in_ms bigint, IN end_ts_in_ms bigint, IN partition_size_in_hours int)
LANGUAGE plpgsql AS
$$
DECLARE
partition_size_in_ms bigint;
p record;
table_name varchar;
BEGIN
partition_size_in_ms = partition_size_in_hours * 3600 * 1000;
FOR p IN SELECT DISTINCT event_type as event_type, (created_time - created_time % partition_size_in_ms) as partition_ts FROM event e WHERE e.event_type in ('DEBUG_RULE_NODE', 'DEBUG_RULE_CHAIN') and ts >= start_ts_in_ms and ts < end_ts_in_ms
LOOP
IF p.event_type = 'DEBUG_RULE_NODE' THEN
table_name := 'rule_node_debug_event';
ELSEIF p.event_type = 'DEBUG_RULE_CHAIN' THEN
table_name := 'rule_chain_debug_event';
END IF;
RAISE NOTICE '[%] Partition to create : [%-%]', table_name, p.partition_ts, (p.partition_ts + partition_size_in_ms);
EXECUTE format('CREATE TABLE IF NOT EXISTS %s_%s PARTITION OF %s FOR VALUES FROM ( %s ) TO ( %s )', table_name, p.partition_ts, table_name, p.partition_ts, (p.partition_ts + partition_size_in_ms));
END LOOP;
INSERT INTO rule_node_debug_event
SELECT id,
tenant_id,
ts,
entity_id,
body ->> 'server',
body ->> 'type',
(body ->> 'entityId')::uuid,
body ->> 'entityName',
(body ->> 'msgId')::uuid,
body ->> 'msgType',
body ->> 'dataType',
body ->> 'relationType',
body ->> 'data',
body ->> 'metadata',
body ->> 'error'
FROM
(select id, tenant_id, ts, entity_id, to_safe_json(body) as body
FROM event WHERE ts >= start_ts_in_ms and ts < end_ts_in_ms AND event_type = 'DEBUG_RULE_NODE' AND to_safe_json(body) ->> 'server' IS NOT NULL
) safe_event
ON CONFLICT DO NOTHING;
INSERT INTO rule_chain_debug_event
SELECT id,
tenant_id,
ts,
entity_id,
body ->> 'server',
body ->> 'message',
body ->> 'error'
FROM
(select id, tenant_id, ts, entity_id, to_safe_json(body) as body
FROM event WHERE ts >= start_ts_in_ms and ts < end_ts_in_ms AND event_type = 'DEBUG_RULE_CHAIN' AND to_safe_json(body) ->> 'server' IS NOT NULL
) safe_event
ON CONFLICT DO NOTHING;
END
$$;
UPDATE tb_user
SET additional_info = REPLACE(additional_info, '"lang":"ja_JA"', '"lang":"ja_JP"')
WHERE additional_info LIKE '%"lang":"ja_JA"%';

142
application/src/main/data/upgrade/3.4.1/schema_update.sql

@ -1,142 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- AUDIT LOGS MIGRATION START
DO
$$
DECLARE table_partition RECORD;
BEGIN
-- in case of running the upgrade script a second time:
IF NOT (SELECT exists(SELECT FROM pg_tables WHERE tablename = 'old_audit_log')) THEN
ALTER TABLE audit_log RENAME TO old_audit_log;
CREATE INDEX IF NOT EXISTS idx_old_audit_log_created_time ON old_audit_log(created_time);
ALTER INDEX IF EXISTS idx_audit_log_tenant_id_and_created_time RENAME TO idx_old_audit_log_tenant_id_and_created_time;
FOR table_partition IN SELECT tablename AS name, split_part(tablename, '_', 3) AS partition_ts
FROM pg_tables WHERE tablename LIKE 'audit_log_%'
LOOP
EXECUTE format('ALTER TABLE %s RENAME TO old_audit_log_%s', table_partition.name, table_partition.partition_ts);
END LOOP;
ELSE
RAISE NOTICE 'Table old_audit_log already exists, leaving as is';
END IF;
END;
$$;
CREATE TABLE IF NOT EXISTS audit_log (
id uuid NOT NULL,
created_time bigint NOT NULL,
tenant_id uuid,
customer_id uuid,
entity_id uuid,
entity_type varchar(255),
entity_name varchar(255),
user_id uuid,
user_name varchar(255),
action_type varchar(255),
action_data varchar(1000000),
action_status varchar(255),
action_failure_details varchar(1000000)
) PARTITION BY RANGE (created_time);
CREATE INDEX IF NOT EXISTS idx_audit_log_tenant_id_and_created_time ON audit_log(tenant_id, created_time DESC);
CREATE INDEX IF NOT EXISTS idx_audit_log_id ON audit_log(id);
CREATE OR REPLACE PROCEDURE migrate_audit_logs(IN start_time_ms BIGINT, IN end_time_ms BIGINT, IN partition_size_ms BIGINT)
LANGUAGE plpgsql AS
$$
DECLARE
p RECORD;
partition_end_ts BIGINT;
BEGIN
FOR p IN SELECT DISTINCT (created_time - created_time % partition_size_ms) AS partition_ts FROM old_audit_log
WHERE created_time >= start_time_ms AND created_time < end_time_ms
LOOP
partition_end_ts = p.partition_ts + partition_size_ms;
RAISE NOTICE '[audit_log] Partition to create : [%-%]', p.partition_ts, partition_end_ts;
EXECUTE format('CREATE TABLE IF NOT EXISTS audit_log_%s PARTITION OF audit_log ' ||
'FOR VALUES FROM ( %s ) TO ( %s )', p.partition_ts, p.partition_ts, partition_end_ts);
END LOOP;
INSERT INTO audit_log
SELECT id, created_time, tenant_id, customer_id, entity_id, entity_type, entity_name, user_id, user_name, action_type, action_data, action_status, action_failure_details
FROM old_audit_log
WHERE created_time >= start_time_ms AND created_time < end_time_ms;
END;
$$;
-- AUDIT LOGS MIGRATION END
-- EDGE EVENTS MIGRATION START
DO
$$
DECLARE table_partition RECORD;
BEGIN
-- in case of running the upgrade script a second time:
IF NOT (SELECT exists(SELECT FROM pg_tables WHERE tablename = 'old_edge_event')) THEN
ALTER TABLE edge_event RENAME TO old_edge_event;
CREATE INDEX IF NOT EXISTS idx_old_edge_event_created_time_tmp ON old_edge_event(created_time);
ALTER INDEX IF EXISTS idx_edge_event_tenant_id_and_created_time RENAME TO idx_old_edge_event_tenant_id_and_created_time;
FOR table_partition IN SELECT tablename AS name, split_part(tablename, '_', 3) AS partition_ts
FROM pg_tables WHERE tablename LIKE 'edge_event_%'
LOOP
EXECUTE format('ALTER TABLE %s RENAME TO old_edge_event_%s', table_partition.name, table_partition.partition_ts);
END LOOP;
ELSE
RAISE NOTICE 'Table old_edge_event already exists, leaving as is';
END IF;
END;
$$;
CREATE TABLE IF NOT EXISTS edge_event (
id uuid NOT NULL,
created_time bigint NOT NULL,
edge_id uuid,
edge_event_type varchar(255),
edge_event_uid varchar(255),
entity_id uuid,
edge_event_action varchar(255),
body varchar(10000000),
tenant_id uuid,
ts bigint NOT NULL
) PARTITION BY RANGE (created_time);
CREATE INDEX IF NOT EXISTS idx_edge_event_tenant_id_and_created_time ON edge_event(tenant_id, created_time DESC);
CREATE INDEX IF NOT EXISTS idx_edge_event_id ON edge_event(id);
CREATE OR REPLACE PROCEDURE migrate_edge_event(IN start_time_ms BIGINT, IN end_time_ms BIGINT, IN partition_size_ms BIGINT)
LANGUAGE plpgsql AS
$$
DECLARE
p RECORD;
partition_end_ts BIGINT;
BEGIN
FOR p IN SELECT DISTINCT (created_time - created_time % partition_size_ms) AS partition_ts FROM old_edge_event
WHERE created_time >= start_time_ms AND created_time < end_time_ms
LOOP
partition_end_ts = p.partition_ts + partition_size_ms;
RAISE NOTICE '[edge_event] Partition to create : [%-%]', p.partition_ts, partition_end_ts;
EXECUTE format('CREATE TABLE IF NOT EXISTS edge_event_%s PARTITION OF edge_event ' ||
'FOR VALUES FROM ( %s ) TO ( %s )', p.partition_ts, p.partition_ts, partition_end_ts);
END LOOP;
INSERT INTO edge_event
SELECT id, created_time, edge_id, edge_event_type, edge_event_uid, entity_id, edge_event_action, body, tenant_id, ts
FROM old_edge_event
WHERE created_time >= start_time_ms AND created_time < end_time_ms;
END;
$$;
-- EDGE EVENTS MIGRATION END

21
application/src/main/data/upgrade/3.4.1/schema_update_after.sql

@ -1,21 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
DROP PROCEDURE IF EXISTS update_asset_profiles;
ALTER TABLE asset ALTER COLUMN asset_profile_id SET NOT NULL;
ALTER TABLE asset DROP CONSTRAINT IF EXISTS fk_asset_profile;
ALTER TABLE asset ADD CONSTRAINT fk_asset_profile FOREIGN KEY (asset_profile_id) REFERENCES asset_profile(id);

46
application/src/main/data/upgrade/3.4.1/schema_update_before.sql

@ -1,46 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
CREATE TABLE IF NOT EXISTS asset_profile (
id uuid NOT NULL CONSTRAINT asset_profile_pkey PRIMARY KEY,
created_time bigint NOT NULL,
name varchar(255),
image varchar(1000000),
description varchar,
search_text varchar(255),
is_default boolean,
tenant_id uuid,
default_rule_chain_id uuid,
default_dashboard_id uuid,
default_queue_name varchar(255),
external_id uuid,
CONSTRAINT asset_profile_name_unq_key UNIQUE (tenant_id, name),
CONSTRAINT asset_profile_external_id_unq_key UNIQUE (tenant_id, external_id),
CONSTRAINT fk_default_rule_chain_asset_profile FOREIGN KEY (default_rule_chain_id) REFERENCES rule_chain(id),
CONSTRAINT fk_default_dashboard_asset_profile FOREIGN KEY (default_dashboard_id) REFERENCES dashboard(id)
);
CREATE OR REPLACE PROCEDURE update_asset_profiles()
LANGUAGE plpgsql AS
$$
BEGIN
UPDATE asset a SET asset_profile_id = COALESCE(
(SELECT id from asset_profile p WHERE p.tenant_id = a.tenant_id AND a.type = p.name),
(SELECT id from asset_profile p WHERE p.tenant_id = a.tenant_id AND p.name = 'default')
)
WHERE a.asset_profile_id IS NULL;
END;
$$;

379
application/src/main/data/upgrade/3.4.4/schema_update.sql

@ -1,379 +0,0 @@
--
-- Copyright © 2016-2024 The Thingsboard Authors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- USER CREDENTIALS START
ALTER TABLE user_credentials
ADD COLUMN IF NOT EXISTS additional_info varchar NOT NULL DEFAULT '{}';
UPDATE user_credentials
SET additional_info = json_build_object('userPasswordHistory', (u.additional_info::json -> 'userPasswordHistory'))
FROM tb_user u WHERE user_credentials.user_id = u.id AND u.additional_info::jsonb ? 'userPasswordHistory';
UPDATE tb_user SET additional_info = tb_user.additional_info::jsonb - 'userPasswordHistory' WHERE additional_info::jsonb ? 'userPasswordHistory';
-- USER CREDENTIALS END
-- ALARM ASSIGN TO USER START
ALTER TABLE alarm ADD COLUMN IF NOT EXISTS assign_ts BIGINT DEFAULT 0;
ALTER TABLE alarm ADD COLUMN IF NOT EXISTS assignee_id UUID;
CREATE INDEX IF NOT EXISTS idx_alarm_tenant_assignee_created_time ON alarm(tenant_id, assignee_id, created_time DESC);
-- ALARM ASSIGN TO USER END
-- ALARM STATUS REFACTORING START
ALTER TABLE alarm ADD COLUMN IF NOT EXISTS acknowledged boolean;
ALTER TABLE alarm ADD COLUMN IF NOT EXISTS cleared boolean;
ALTER TABLE alarm ADD COLUMN IF NOT EXISTS status varchar; -- to avoid failure of the subsequent upgrade.
UPDATE alarm SET acknowledged = true, cleared = true WHERE status = 'CLEARED_ACK';
UPDATE alarm SET acknowledged = true, cleared = false WHERE status = 'ACTIVE_ACK';
UPDATE alarm SET acknowledged = false, cleared = true WHERE status = 'CLEARED_UNACK';
UPDATE alarm SET acknowledged = false, cleared = false WHERE status = 'ACTIVE_UNACK';
-- Drop index by 'status' column and replace with new indexes that has only active alarms;
DROP INDEX IF EXISTS idx_alarm_originator_alarm_type_active;
CREATE INDEX IF NOT EXISTS idx_alarm_originator_alarm_type_active
ON alarm USING btree (originator_id, type) WHERE cleared = false;
DROP INDEX IF EXISTS idx_alarm_tenant_alarm_type_active;
CREATE INDEX IF NOT EXISTS idx_alarm_tenant_alarm_type_active
ON alarm USING btree (tenant_id, type) WHERE cleared = false;
-- Cover index by alarm type to optimize propagated alarm queries;
DROP INDEX IF EXISTS idx_entity_alarm_entity_id_alarm_type_created_time_alarm_id;
CREATE INDEX IF NOT EXISTS idx_entity_alarm_entity_id_alarm_type_created_time_alarm_id ON entity_alarm
USING btree (tenant_id, entity_id, alarm_type, created_time DESC) INCLUDE(alarm_id);
DROP INDEX IF EXISTS idx_alarm_tenant_status_created_time;
ALTER TABLE alarm DROP COLUMN IF EXISTS status;
-- Update old alarms and set their state to clear, if there are newer alarms.
UPDATE alarm a
SET cleared = TRUE
WHERE cleared = FALSE
AND id != (SELECT l.id
FROM alarm l
WHERE l.tenant_id = a.tenant_id
AND l.originator_id = a.originator_id
AND l.type = a.type
ORDER BY l.created_time DESC, l.id
LIMIT 1);
-- ALARM STATUS REFACTORING END
-- ALARM COMMENTS START
CREATE TABLE IF NOT EXISTS alarm_comment (
id uuid NOT NULL,
created_time bigint NOT NULL,
alarm_id uuid NOT NULL,
user_id uuid,
type varchar(255) NOT NULL,
comment varchar(10000),
CONSTRAINT fk_alarm_comment_alarm_id FOREIGN KEY (alarm_id) REFERENCES alarm(id) ON DELETE CASCADE
) PARTITION BY RANGE (created_time);
CREATE INDEX IF NOT EXISTS idx_alarm_comment_alarm_id ON alarm_comment(alarm_id);
-- ALARM COMMENTS END
-- NOTIFICATIONS START
CREATE TABLE IF NOT EXISTS notification_target (
id UUID NOT NULL CONSTRAINT notification_target_pkey PRIMARY KEY,
created_time BIGINT NOT NULL,
tenant_id UUID NOT NULL,
name VARCHAR(255) NOT NULL,
configuration VARCHAR(10000) NOT NULL,
CONSTRAINT uq_notification_target_name UNIQUE (tenant_id, name)
);
CREATE INDEX IF NOT EXISTS idx_notification_target_tenant_id_created_time ON notification_target(tenant_id, created_time DESC);
CREATE TABLE IF NOT EXISTS notification_template (
id UUID NOT NULL CONSTRAINT notification_template_pkey PRIMARY KEY,
created_time BIGINT NOT NULL,
tenant_id UUID NOT NULL,
name VARCHAR(255) NOT NULL,
notification_type VARCHAR(50) NOT NULL,
configuration VARCHAR(10000000) NOT NULL,
CONSTRAINT uq_notification_template_name UNIQUE (tenant_id, name)
);
CREATE INDEX IF NOT EXISTS idx_notification_template_tenant_id_created_time ON notification_template(tenant_id, created_time DESC);
CREATE TABLE IF NOT EXISTS notification_rule (
id UUID NOT NULL CONSTRAINT notification_rule_pkey PRIMARY KEY,
created_time BIGINT NOT NULL,
tenant_id UUID NOT NULL,
name VARCHAR(255) NOT NULL,
template_id UUID NOT NULL CONSTRAINT fk_notification_rule_template_id REFERENCES notification_template(id),
trigger_type VARCHAR(50) NOT NULL,
trigger_config VARCHAR(1000) NOT NULL,
recipients_config VARCHAR(10000) NOT NULL,
additional_config VARCHAR(255),
CONSTRAINT uq_notification_rule_name UNIQUE (tenant_id, name)
);
CREATE INDEX IF NOT EXISTS idx_notification_rule_tenant_id_trigger_type_created_time ON notification_rule(tenant_id, trigger_type, created_time DESC);
CREATE TABLE IF NOT EXISTS notification_request (
id UUID NOT NULL CONSTRAINT notification_request_pkey PRIMARY KEY,
created_time BIGINT NOT NULL,
tenant_id UUID NOT NULL,
targets VARCHAR(10000) NOT NULL,
template_id UUID,
template VARCHAR(10000000),
info VARCHAR(1000),
additional_config VARCHAR(1000),
originator_entity_id UUID,
originator_entity_type VARCHAR(32),
rule_id UUID NULL,
status VARCHAR(32),
stats VARCHAR(10000)
);
CREATE INDEX IF NOT EXISTS idx_notification_request_tenant_id_user_created_time ON notification_request(tenant_id, created_time DESC)
WHERE originator_entity_type = 'USER';
CREATE INDEX IF NOT EXISTS idx_notification_request_rule_id_originator_entity_id ON notification_request(rule_id, originator_entity_id)
WHERE originator_entity_type = 'ALARM';
CREATE INDEX IF NOT EXISTS idx_notification_request_status ON notification_request(status)
WHERE status = 'SCHEDULED';
CREATE TABLE IF NOT EXISTS notification (
id UUID NOT NULL,
created_time BIGINT NOT NULL,
request_id UUID NULL CONSTRAINT fk_notification_request_id REFERENCES notification_request(id) ON DELETE CASCADE,
recipient_id UUID NOT NULL CONSTRAINT fk_notification_recipient_id REFERENCES tb_user(id) ON DELETE CASCADE,
type VARCHAR(50) NOT NULL,
subject VARCHAR(255),
body VARCHAR(1000) NOT NULL,
additional_config VARCHAR(1000),
status VARCHAR(32)
) PARTITION BY RANGE (created_time);
CREATE INDEX IF NOT EXISTS idx_notification_id ON notification(id);
CREATE INDEX IF NOT EXISTS idx_notification_recipient_id_created_time ON notification(recipient_id, created_time DESC);
-- NOTIFICATIONS END
ALTER TABLE tb_user ADD COLUMN IF NOT EXISTS phone VARCHAR(255);
CREATE TABLE IF NOT EXISTS user_settings (
user_id uuid NOT NULL,
type VARCHAR(50) NOT NULL,
settings varchar(10000),
CONSTRAINT fk_user_id FOREIGN KEY (user_id) REFERENCES tb_user(id) ON DELETE CASCADE,
CONSTRAINT user_settings_pkey PRIMARY KEY (user_id, type)
);
-- TTL DROP PARTITIONS FUNCTIONS UPDATE START
DROP PROCEDURE IF EXISTS drop_partitions_by_max_ttl(character varying, bigint, bigint);
DROP FUNCTION IF EXISTS get_partition_by_max_ttl_date;
CREATE OR REPLACE FUNCTION get_partition_by_system_ttl_date(IN partition_type varchar, IN date timestamp, OUT partition varchar) AS
$$
BEGIN
CASE
WHEN partition_type = 'DAYS' THEN
partition := 'ts_kv_' || to_char(date, 'yyyy') || '_' || to_char(date, 'MM') || '_' || to_char(date, 'dd');
WHEN partition_type = 'MONTHS' THEN
partition := 'ts_kv_' || to_char(date, 'yyyy') || '_' || to_char(date, 'MM');
WHEN partition_type = 'YEARS' THEN
partition := 'ts_kv_' || to_char(date, 'yyyy');
ELSE
partition := NULL;
END CASE;
IF partition IS NOT NULL THEN
IF NOT EXISTS(SELECT
FROM pg_tables
WHERE schemaname = 'public'
AND tablename = partition) THEN
partition := NULL;
RAISE NOTICE 'Failed to found partition by ttl';
END IF;
END IF;
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE PROCEDURE drop_partitions_by_system_ttl(IN partition_type varchar, IN system_ttl bigint, INOUT deleted bigint)
LANGUAGE plpgsql AS
$$
DECLARE
date timestamp;
partition_by_max_ttl_date varchar;
partition_by_max_ttl_month varchar;
partition_by_max_ttl_day varchar;
partition_by_max_ttl_year varchar;
partition varchar;
partition_year integer;
partition_month integer;
partition_day integer;
BEGIN
if system_ttl IS NOT NULL AND system_ttl > 0 THEN
date := to_timestamp(EXTRACT(EPOCH FROM current_timestamp) - system_ttl);
partition_by_max_ttl_date := get_partition_by_system_ttl_date(partition_type, date);
RAISE NOTICE 'Date by max ttl: %', date;
RAISE NOTICE 'Partition by max ttl: %', partition_by_max_ttl_date;
IF partition_by_max_ttl_date IS NOT NULL THEN
CASE
WHEN partition_type = 'DAYS' THEN
partition_by_max_ttl_year := SPLIT_PART(partition_by_max_ttl_date, '_', 3);
partition_by_max_ttl_month := SPLIT_PART(partition_by_max_ttl_date, '_', 4);
partition_by_max_ttl_day := SPLIT_PART(partition_by_max_ttl_date, '_', 5);
WHEN partition_type = 'MONTHS' THEN
partition_by_max_ttl_year := SPLIT_PART(partition_by_max_ttl_date, '_', 3);
partition_by_max_ttl_month := SPLIT_PART(partition_by_max_ttl_date, '_', 4);
ELSE
partition_by_max_ttl_year := SPLIT_PART(partition_by_max_ttl_date, '_', 3);
END CASE;
IF partition_by_max_ttl_year IS NULL THEN
RAISE NOTICE 'Failed to remove partitions by max ttl date due to partition_by_max_ttl_year is null!';
ELSE
IF partition_type = 'YEARS' THEN
FOR partition IN SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
AND tablename != partition_by_max_ttl_date
LOOP
partition_year := SPLIT_PART(partition, '_', 3)::integer;
IF partition_year < partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
END IF;
END LOOP;
ELSE
IF partition_type = 'MONTHS' THEN
IF partition_by_max_ttl_month IS NULL THEN
RAISE NOTICE 'Failed to remove months partitions by max ttl date due to partition_by_max_ttl_month is null!';
ELSE
FOR partition IN SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
AND tablename != partition_by_max_ttl_date
LOOP
partition_year := SPLIT_PART(partition, '_', 3)::integer;
IF partition_year > partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_year < partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
ELSE
partition_month := SPLIT_PART(partition, '_', 4)::integer;
IF partition_year = partition_by_max_ttl_year::integer THEN
IF partition_month >= partition_by_max_ttl_month::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
END IF;
END IF;
END IF;
END IF;
END LOOP;
END IF;
ELSE
IF partition_type = 'DAYS' THEN
IF partition_by_max_ttl_month IS NULL THEN
RAISE NOTICE 'Failed to remove days partitions by max ttl date due to partition_by_max_ttl_month is null!';
ELSE
IF partition_by_max_ttl_day IS NULL THEN
RAISE NOTICE 'Failed to remove days partitions by max ttl date due to partition_by_max_ttl_day is null!';
ELSE
FOR partition IN SELECT tablename
FROM pg_tables
WHERE schemaname = 'public'
AND tablename like 'ts_kv_' || '%'
AND tablename != 'ts_kv_latest'
AND tablename != 'ts_kv_dictionary'
AND tablename != 'ts_kv_indefinite'
AND tablename != partition_by_max_ttl_date
LOOP
partition_year := SPLIT_PART(partition, '_', 3)::integer;
IF partition_year > partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_year < partition_by_max_ttl_year::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
ELSE
partition_month := SPLIT_PART(partition, '_', 4)::integer;
IF partition_month > partition_by_max_ttl_month::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_month < partition_by_max_ttl_month::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
ELSE
partition_day := SPLIT_PART(partition, '_', 5)::integer;
IF partition_day >= partition_by_max_ttl_day::integer THEN
RAISE NOTICE 'Skip iteration! Partition: % is valid!', partition;
CONTINUE;
ELSE
IF partition_day < partition_by_max_ttl_day::integer THEN
RAISE NOTICE 'Partition to delete by max ttl: %', partition;
EXECUTE format('DROP TABLE IF EXISTS %I', partition);
deleted := deleted + 1;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END LOOP;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END IF;
END
$$;
-- TTL DROP PARTITIONS FUNCTIONS UPDATE END
-- RULE NODE SINGLETON MODE SUPPORT
ALTER TABLE rule_node ADD COLUMN IF NOT EXISTS singleton_mode bool DEFAULT false;
UPDATE rule_node SET singleton_mode = true WHERE type IN ('org.thingsboard.rule.engine.mqtt.azure.TbAzureIotHubNode', 'org.thingsboard.rule.engine.mqtt.TbMqttNode');
ALTER TABLE component_descriptor ADD COLUMN IF NOT EXISTS clustering_mode varchar(255) DEFAULT 'ENABLED';
UPDATE component_descriptor SET clustering_mode = 'USER_PREFERENCE' WHERE clazz = 'org.thingsboard.rule.engine.mqtt.TbMqttNode';
UPDATE component_descriptor SET clustering_mode = 'SINGLETON' WHERE clazz = 'org.thingsboard.rule.engine.mqtt.azure.TbAzureIotHubNode';

14
application/src/main/java/org/thingsboard/server/actors/device/DeviceActorMessageProcessor.java

@ -66,7 +66,6 @@ import org.thingsboard.server.common.msg.rule.engine.DeviceCredentialsUpdateNoti
import org.thingsboard.server.common.msg.rule.engine.DeviceEdgeUpdateMsg;
import org.thingsboard.server.common.msg.rule.engine.DeviceNameOrTypeUpdateMsg;
import org.thingsboard.server.common.msg.timeout.DeviceActorServerSideRpcTimeoutMsg;
import org.thingsboard.server.gen.transport.TransportProtos;
import org.thingsboard.server.gen.transport.TransportProtos.AttributeUpdateNotificationMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ClaimDeviceMsg;
import org.thingsboard.server.gen.transport.TransportProtos.DeviceSessionsCacheEntry;
@ -90,7 +89,9 @@ import org.thingsboard.server.gen.transport.TransportProtos.ToTransportMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ToTransportUpdateCredentialsProto;
import org.thingsboard.server.gen.transport.TransportProtos.TransportToDeviceActorMsg;
import org.thingsboard.server.gen.transport.TransportProtos.TsKvProto;
import org.thingsboard.server.gen.transport.TransportProtos.UplinkNotificationMsg;
import org.thingsboard.server.service.rpc.RpcSubmitStrategy;
import org.thingsboard.server.service.state.DefaultDeviceStateService;
import org.thingsboard.server.service.transport.msg.TransportToDeviceActorMsgWrapper;
import javax.annotation.Nullable;
@ -173,7 +174,7 @@ public class DeviceActorMessageProcessor extends AbstractContextAwareMsgProcesso
private EdgeId findRelatedEdgeId() {
List<EntityRelation> result =
systemContext.getRelationService().findByToAndType(tenantId, deviceId, EntityRelation.CONTAINS_TYPE, RelationTypeGroup.COMMON);
systemContext.getRelationService().findByToAndType(tenantId, deviceId, EntityRelation.CONTAINS_TYPE, RelationTypeGroup.EDGE);
if (result != null && result.size() > 0) {
EntityRelation relationToEdge = result.get(0);
if (relationToEdge.getFrom() != null && relationToEdge.getFrom().getId() != null) {
@ -212,8 +213,11 @@ public class DeviceActorMessageProcessor extends AbstractContextAwareMsgProcesso
if (systemContext.isEdgesEnabled() && edgeId != null) {
log.debug("[{}][{}] device is related to edge: [{}]. Saving RPC request: [{}][{}] to edge queue", tenantId, deviceId, edgeId.getId(), rpcId, requestId);
try {
saveRpcRequestToEdgeQueue(request, requestId).get();
sent = true;
if (systemContext.getEdgeService().isEdgeActiveAsync(tenantId, edgeId, DefaultDeviceStateService.ACTIVITY_STATE).get()) {
saveRpcRequestToEdgeQueue(request, requestId).get();
} else {
log.error("[{}][{}][{}] Failed to save RPC request to edge queue {}. The Edge is currently offline or unreachable", tenantId, deviceId, edgeId.getId(), request);
}
} catch (InterruptedException | ExecutionException e) {
log.error("[{}][{}][{}] Failed to save RPC request to edge queue {}", tenantId, deviceId, edgeId.getId(), request, e);
}
@ -470,7 +474,7 @@ public class DeviceActorMessageProcessor extends AbstractContextAwareMsgProcesso
callback.onSuccess();
}
private void processUplinkNotificationMsg(SessionInfoProto sessionInfo, TransportProtos.UplinkNotificationMsg uplinkNotificationMsg) {
private void processUplinkNotificationMsg(SessionInfoProto sessionInfo, UplinkNotificationMsg uplinkNotificationMsg) {
String nodeId = sessionInfo.getNodeId();
sessions.entrySet().stream()
.filter(kv -> kv.getValue().getSessionInfo().getNodeId().equals(nodeId) && (kv.getValue().isSubscribedToAttributes() || kv.getValue().isSubscribedToRPC()))

2
application/src/main/java/org/thingsboard/server/actors/ruleChain/RuleChainActorMessageProcessor.java

@ -16,7 +16,6 @@
package org.thingsboard.server.actors.ruleChain;
import lombok.extern.slf4j.Slf4j;
import org.thingsboard.server.common.data.msg.TbNodeConnectionType;
import org.thingsboard.server.actors.ActorSystemContext;
import org.thingsboard.server.actors.TbActorCtx;
import org.thingsboard.server.actors.TbActorRef;
@ -29,6 +28,7 @@ import org.thingsboard.server.common.data.id.EntityId;
import org.thingsboard.server.common.data.id.RuleChainId;
import org.thingsboard.server.common.data.id.RuleNodeId;
import org.thingsboard.server.common.data.id.TenantId;
import org.thingsboard.server.common.data.msg.TbNodeConnectionType;
import org.thingsboard.server.common.data.plugin.ComponentLifecycleEvent;
import org.thingsboard.server.common.data.plugin.ComponentLifecycleState;
import org.thingsboard.server.common.data.relation.EntityRelation;

2
application/src/main/java/org/thingsboard/server/config/RateLimitProcessingFilter.java

@ -26,7 +26,7 @@ import org.thingsboard.server.common.data.EntityType;
import org.thingsboard.server.common.data.exception.TenantProfileNotFoundException;
import org.thingsboard.server.common.data.limit.LimitedApi;
import org.thingsboard.server.common.msg.tools.TbRateLimitsException;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.exception.ThingsboardErrorResponseHandler;
import org.thingsboard.server.service.security.model.SecurityUser;

2
application/src/main/java/org/thingsboard/server/controller/AuthController.java

@ -48,7 +48,7 @@ import org.thingsboard.server.common.data.security.event.UserSessionInvalidation
import org.thingsboard.server.common.data.security.model.JwtPair;
import org.thingsboard.server.common.data.security.model.SecuritySettings;
import org.thingsboard.server.common.data.security.model.UserPasswordPolicy;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.queue.util.TbCoreComponent;
import org.thingsboard.server.service.security.auth.rest.RestAuthenticationDetails;
import org.thingsboard.server.service.security.model.ActivateUserRequest;

15
application/src/main/java/org/thingsboard/server/controller/BaseController.java

@ -20,8 +20,6 @@ import com.google.common.util.concurrent.ListenableFuture;
import lombok.Getter;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.postgresql.util.PSQLException;
import org.postgresql.util.ServerErrorMessage;
import org.slf4j.Logger;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
@ -378,16 +376,13 @@ public abstract class BaseController {
} else if (exception instanceof AsyncRequestTimeoutException) {
return new ThingsboardException("Request timeout", ThingsboardErrorCode.GENERAL);
} else if (exception instanceof DataAccessException) {
Throwable rootCause = ExceptionUtils.getRootCause(exception);
if (rootCause instanceof PSQLException) {
return new ThingsboardException(Optional.ofNullable(((PSQLException) rootCause).getServerErrorMessage())
.map(ServerErrorMessage::getMessage).orElse(rootCause.getMessage()), ThingsboardErrorCode.GENERAL);
} else {
return new ThingsboardException(rootCause, ThingsboardErrorCode.GENERAL);
String errorType = exception.getClass().getSimpleName();
if (!logControllerErrorStackTrace) { // not to log the error twice
log.warn("Database error: {} - {}", errorType, ExceptionUtils.getRootCauseMessage(exception));
}
} else {
return new ThingsboardException(exception.getMessage(), exception, ThingsboardErrorCode.GENERAL);
return new ThingsboardException("Database error", ThingsboardErrorCode.GENERAL);
}
return new ThingsboardException(exception.getMessage(), exception, ThingsboardErrorCode.GENERAL);
}
/**

2
application/src/main/java/org/thingsboard/server/controller/plugin/TbWebSocketHandler.java

@ -43,7 +43,7 @@ import org.thingsboard.server.common.data.limit.LimitedApi;
import org.thingsboard.server.common.data.tenant.profile.DefaultTenantProfileConfiguration;
import org.thingsboard.server.config.WebSocketConfiguration;
import org.thingsboard.server.dao.tenant.TbTenantProfileCache;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.queue.util.TbCoreComponent;
import org.thingsboard.server.service.security.auth.jwt.JwtAuthenticationProvider;
import org.thingsboard.server.service.security.model.SecurityUser;

156
application/src/main/java/org/thingsboard/server/install/ThingsboardInstallService.java

@ -24,14 +24,12 @@ import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
import org.thingsboard.server.service.component.ComponentDiscoveryService;
import org.thingsboard.server.service.install.DatabaseEntitiesUpgradeService;
import org.thingsboard.server.service.install.DatabaseTsUpgradeService;
import org.thingsboard.server.service.install.EntityDatabaseSchemaService;
import org.thingsboard.server.service.install.InstallScripts;
import org.thingsboard.server.service.install.NoSqlKeyspaceService;
import org.thingsboard.server.service.install.SystemDataLoaderService;
import org.thingsboard.server.service.install.TsDatabaseSchemaService;
import org.thingsboard.server.service.install.TsLatestDatabaseSchemaService;
import org.thingsboard.server.service.install.migrate.EntitiesMigrateService;
import org.thingsboard.server.service.install.migrate.TsLatestMigrateService;
import org.thingsboard.server.service.install.update.CacheCleanupService;
import org.thingsboard.server.service.install.update.DataUpdateService;
@ -70,9 +68,6 @@ public class ThingsboardInstallService {
@Autowired
private DatabaseEntitiesUpgradeService databaseEntitiesUpgradeService;
@Autowired(required = false)
private DatabaseTsUpgradeService databaseTsUpgradeService;
@Autowired
private ComponentDiscoveryService componentDiscoveryService;
@ -88,9 +83,6 @@ public class ThingsboardInstallService {
@Autowired
private CacheCleanupService cacheCleanupService;
@Autowired(required = false)
private EntitiesMigrateService entitiesMigrateService;
@Autowired(required = false)
private TsLatestMigrateService latestMigrateService;
@ -104,158 +96,13 @@ public class ThingsboardInstallService {
cacheCleanupService.clearCache(upgradeFromVersion);
if ("2.5.0-cassandra".equals(upgradeFromVersion)) {
log.info("Migrating ThingsBoard entities data from cassandra to SQL database ...");
entitiesMigrateService.migrate();
log.info("Updating system data...");
systemDataLoaderService.loadSystemWidgets();
} else if ("3.0.1-cassandra".equals(upgradeFromVersion)) {
if ("cassandra-latest-to-postgres".equals(upgradeFromVersion)) {
log.info("Migrating ThingsBoard latest timeseries data from cassandra to SQL database ...");
latestMigrateService.migrate();
} else if (upgradeFromVersion.equals("3.6.2-images")) {
installScripts.updateImages();
} else {
switch (upgradeFromVersion) {
case "1.2.3": //NOSONAR, Need to execute gradual upgrade starting from upgradeFromVersion
log.info("Upgrading ThingsBoard from version 1.2.3 to 1.3.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("1.2.3");
case "1.3.0": //NOSONAR, Need to execute gradual upgrade starting from upgradeFromVersion
log.info("Upgrading ThingsBoard from version 1.3.0 to 1.3.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("1.3.0");
case "1.3.1": //NOSONAR, Need to execute gradual upgrade starting from upgradeFromVersion
log.info("Upgrading ThingsBoard from version 1.3.1 to 1.4.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("1.3.1");
case "1.4.0":
log.info("Upgrading ThingsBoard from version 1.4.0 to 2.0.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("1.4.0");
dataUpdateService.updateData("1.4.0");
case "2.0.0":
log.info("Upgrading ThingsBoard from version 2.0.0 to 2.1.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.0.0");
case "2.1.1":
log.info("Upgrading ThingsBoard from version 2.1.1 to 2.1.2 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.1.1");
case "2.1.3":
log.info("Upgrading ThingsBoard from version 2.1.3 to 2.2.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.1.3");
case "2.3.0":
log.info("Upgrading ThingsBoard from version 2.3.0 to 2.3.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.3.0");
case "2.3.1":
log.info("Upgrading ThingsBoard from version 2.3.1 to 2.4.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.3.1");
case "2.4.0":
log.info("Upgrading ThingsBoard from version 2.4.0 to 2.4.1 ...");
case "2.4.1":
log.info("Upgrading ThingsBoard from version 2.4.1 to 2.4.2 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.4.1");
case "2.4.2":
log.info("Upgrading ThingsBoard from version 2.4.2 to 2.4.3 ...");
databaseEntitiesUpgradeService.upgradeDatabase("2.4.2");
case "2.4.3":
log.info("Upgrading ThingsBoard from version 2.4.3 to 2.5 ...");
if (databaseTsUpgradeService != null) {
databaseTsUpgradeService.upgradeDatabase("2.4.3");
}
databaseEntitiesUpgradeService.upgradeDatabase("2.4.3");
case "2.5.0":
log.info("Upgrading ThingsBoard from version 2.5.0 to 2.5.1 ...");
if (databaseTsUpgradeService != null) {
databaseTsUpgradeService.upgradeDatabase("2.5.0");
}
case "2.5.1":
log.info("Upgrading ThingsBoard from version 2.5.1 to 3.0.0 ...");
case "3.0.1":
log.info("Upgrading ThingsBoard from version 3.0.1 to 3.1.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.0.1");
dataUpdateService.updateData("3.0.1");
case "3.1.0":
log.info("Upgrading ThingsBoard from version 3.1.0 to 3.1.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.1.0");
case "3.1.1":
log.info("Upgrading ThingsBoard from version 3.1.1 to 3.2.0 ...");
if (databaseTsUpgradeService != null) {
databaseTsUpgradeService.upgradeDatabase("3.1.1");
}
databaseEntitiesUpgradeService.upgradeDatabase("3.1.1");
dataUpdateService.updateData("3.1.1");
systemDataLoaderService.createOAuth2Templates();
case "3.2.0":
log.info("Upgrading ThingsBoard from version 3.2.0 to 3.2.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.2.0");
case "3.2.1":
log.info("Upgrading ThingsBoard from version 3.2.1 to 3.2.2 ...");
if (databaseTsUpgradeService != null) {
databaseTsUpgradeService.upgradeDatabase("3.2.1");
}
databaseEntitiesUpgradeService.upgradeDatabase("3.2.1");
case "3.2.2":
log.info("Upgrading ThingsBoard from version 3.2.2 to 3.3.0 ...");
if (databaseTsUpgradeService != null) {
databaseTsUpgradeService.upgradeDatabase("3.2.2");
}
databaseEntitiesUpgradeService.upgradeDatabase("3.2.2");
dataUpdateService.updateData("3.2.2");
systemDataLoaderService.createOAuth2Templates();
case "3.3.0":
log.info("Upgrading ThingsBoard from version 3.3.0 to 3.3.1 ...");
case "3.3.1":
log.info("Upgrading ThingsBoard from version 3.3.1 to 3.3.2 ...");
case "3.3.2":
log.info("Upgrading ThingsBoard from version 3.3.2 to 3.3.3 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.3.2");
dataUpdateService.updateData("3.3.2");
case "3.3.3":
log.info("Upgrading ThingsBoard from version 3.3.3 to 3.3.4 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.3.3");
case "3.3.4":
log.info("Upgrading ThingsBoard from version 3.3.4 to 3.4.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.3.4");
dataUpdateService.updateData("3.3.4");
case "3.4.0":
log.info("Upgrading ThingsBoard from version 3.4.0 to 3.4.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.4.0");
dataUpdateService.updateData("3.4.0");
case "3.4.1":
log.info("Upgrading ThingsBoard from version 3.4.1 to 3.4.2 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.4.1");
dataUpdateService.updateData("3.4.1");
case "3.4.2":
log.info("Upgrading ThingsBoard from version 3.4.2 to 3.4.3 ...");
case "3.4.3":
log.info("Upgrading ThingsBoard from version 3.4.3 to 3.4.4 ...");
case "3.4.4":
log.info("Upgrading ThingsBoard from version 3.4.4 to 3.5.0 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.4.4");
if (!getEnv("SKIP_DEFAULT_NOTIFICATION_CONFIGS_CREATION", false)) {
systemDataLoaderService.createDefaultNotificationConfigs();
} else {
log.info("Skipping default notification configs creation");
}
case "3.5.0":
log.info("Upgrading ThingsBoard from version 3.5.0 to 3.5.1 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.5.0");
@ -279,6 +126,7 @@ public class ThingsboardInstallService {
case "3.6.2":
log.info("Upgrading ThingsBoard from version 3.6.2 to 3.6.3 ...");
databaseEntitiesUpgradeService.upgradeDatabase("3.6.2");
systemDataLoaderService.updateDefaultNotificationConfigs();
//TODO DON'T FORGET to update switch statement in the CacheCleanupService if you need to clear the cache
break;
default:

4
application/src/main/java/org/thingsboard/server/service/edge/EdgeContextComponent.java

@ -20,6 +20,7 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Lazy;
import org.springframework.stereotype.Component;
import org.thingsboard.server.cluster.TbClusterService;
import org.thingsboard.server.common.msg.notification.NotificationRuleProcessor;
import org.thingsboard.server.dao.asset.AssetProfileService;
import org.thingsboard.server.dao.asset.AssetService;
import org.thingsboard.server.dao.attributes.AttributesService;
@ -149,6 +150,9 @@ public class EdgeContextComponent {
@Autowired
private ResourceService resourceService;
@Autowired
private NotificationRuleProcessor notificationRuleProcessor;
@Autowired
private AlarmEdgeProcessor alarmProcessor;

28
application/src/main/java/org/thingsboard/server/service/edge/rpc/EdgeGrpcService.java

@ -38,6 +38,7 @@ import org.thingsboard.server.common.data.kv.BasicTsKvEntry;
import org.thingsboard.server.common.data.kv.BooleanDataEntry;
import org.thingsboard.server.common.data.kv.LongDataEntry;
import org.thingsboard.server.common.data.msg.TbMsgType;
import org.thingsboard.server.common.data.notification.rule.trigger.EdgeConnectionTrigger;
import org.thingsboard.server.common.msg.TbMsg;
import org.thingsboard.server.common.msg.TbMsgDataType;
import org.thingsboard.server.common.msg.TbMsgMetaData;
@ -263,7 +264,8 @@ public class EdgeGrpcService extends EdgeRpcServiceGrpc.EdgeRpcServiceImplBase i
}
private void onEdgeConnect(EdgeId edgeId, EdgeGrpcSession edgeGrpcSession) {
TenantId tenantId = edgeGrpcSession.getEdge().getTenantId();
Edge edge = edgeGrpcSession.getEdge();
TenantId tenantId = edge.getTenantId();
log.info("[{}][{}] edge [{}] connected successfully.", tenantId, edgeGrpcSession.getSessionId(), edgeId);
sessions.put(edgeId, edgeGrpcSession);
final Lock newEventLock = sessionNewEventsLocks.computeIfAbsent(edgeId, id -> new ReentrantLock());
@ -276,7 +278,7 @@ public class EdgeGrpcService extends EdgeRpcServiceGrpc.EdgeRpcServiceImplBase i
save(tenantId, edgeId, DefaultDeviceStateService.ACTIVITY_STATE, true);
long lastConnectTs = System.currentTimeMillis();
save(tenantId, edgeId, DefaultDeviceStateService.LAST_CONNECT_TIME, lastConnectTs);
pushRuleEngineMessage(tenantId, edgeId, lastConnectTs, TbMsgType.CONNECT_EVENT);
pushRuleEngineMessage(tenantId, edge, lastConnectTs, TbMsgType.CONNECT_EVENT);
cancelScheduleEdgeEventsCheck(edgeId);
scheduleEdgeEventsCheck(edgeGrpcSession);
}
@ -381,7 +383,8 @@ public class EdgeGrpcService extends EdgeRpcServiceGrpc.EdgeRpcServiceImplBase i
}
}
private void onEdgeDisconnect(EdgeId edgeId, UUID sessionId) {
private void onEdgeDisconnect(Edge edge, UUID sessionId) {
EdgeId edgeId = edge.getId();
log.info("[{}][{}] edge disconnected!", edgeId, sessionId);
EdgeGrpcSession toRemove = sessions.get(edgeId);
if (toRemove.getSessionId().equals(sessionId)) {
@ -397,7 +400,7 @@ public class EdgeGrpcService extends EdgeRpcServiceGrpc.EdgeRpcServiceImplBase i
save(tenantId, edgeId, DefaultDeviceStateService.ACTIVITY_STATE, false);
long lastDisconnectTs = System.currentTimeMillis();
save(tenantId, edgeId, DefaultDeviceStateService.LAST_DISCONNECT_TIME, lastDisconnectTs);
pushRuleEngineMessage(toRemove.getEdge().getTenantId(), edgeId, lastDisconnectTs, TbMsgType.DISCONNECT_EVENT);
pushRuleEngineMessage(toRemove.getEdge().getTenantId(), edge, lastDisconnectTs, TbMsgType.DISCONNECT_EVENT);
cancelScheduleEdgeEventsCheck(edgeId);
} else {
log.debug("[{}] edge session [{}] is not available anymore, nothing to remove. most probably this session is already outdated!", edgeId, sessionId);
@ -452,25 +455,36 @@ public class EdgeGrpcService extends EdgeRpcServiceGrpc.EdgeRpcServiceImplBase i
}
}
private void pushRuleEngineMessage(TenantId tenantId, EdgeId edgeId, long ts, TbMsgType msgType) {
private void pushRuleEngineMessage(TenantId tenantId, Edge edge, long ts, TbMsgType msgType) {
try {
EdgeId edgeId = edge.getId();
ObjectNode edgeState = JacksonUtil.newObjectNode();
if (msgType.equals(TbMsgType.CONNECT_EVENT)) {
boolean isConnected = TbMsgType.CONNECT_EVENT.equals(msgType);
if (isConnected) {
edgeState.put(DefaultDeviceStateService.ACTIVITY_STATE, true);
edgeState.put(DefaultDeviceStateService.LAST_CONNECT_TIME, ts);
} else {
edgeState.put(DefaultDeviceStateService.ACTIVITY_STATE, false);
edgeState.put(DefaultDeviceStateService.LAST_DISCONNECT_TIME, ts);
}
ctx.getNotificationRuleProcessor().process(EdgeConnectionTrigger.builder()
.tenantId(tenantId)
.customerId(edge.getCustomerId())
.edgeId(edgeId)
.edgeName(edge.getName())
.connected(isConnected).build());
String data = JacksonUtil.toString(edgeState);
TbMsgMetaData md = new TbMsgMetaData();
if (!persistToTelemetry) {
md.putValue(DataConstants.SCOPE, DataConstants.SERVER_SCOPE);
md.putValue("edgeName", edge.getName());
md.putValue("edgeType", edge.getType());
}
TbMsg tbMsg = TbMsg.newMsg(msgType, edgeId, md, TbMsgDataType.JSON, data);
clusterService.pushMsgToRuleEngine(tenantId, edgeId, tbMsg, null);
} catch (Exception e) {
log.warn("[{}][{}] Failed to push {}", tenantId, edgeId, msgType, e);
log.warn("[{}][{}] Failed to push {}", tenantId, edge.getId(), msgType, e);
}
}
}

50
application/src/main/java/org/thingsboard/server/service/edge/rpc/EdgeGrpcSession.java

@ -35,6 +35,7 @@ import org.thingsboard.server.common.data.kv.AttributeKvEntry;
import org.thingsboard.server.common.data.kv.BaseAttributeKvEntry;
import org.thingsboard.server.common.data.kv.LongDataEntry;
import org.thingsboard.server.common.data.kv.StringDataEntry;
import org.thingsboard.server.common.data.notification.rule.trigger.EdgeCommunicationFailureTrigger;
import org.thingsboard.server.common.data.page.PageData;
import org.thingsboard.server.common.data.page.PageLink;
import org.thingsboard.server.common.data.page.SortOrder;
@ -111,7 +112,7 @@ public final class EdgeGrpcSession implements Closeable {
private final UUID sessionId;
private final BiConsumer<EdgeId, EdgeGrpcSession> sessionOpenListener;
private final BiConsumer<EdgeId, UUID> sessionCloseListener;
private final BiConsumer<Edge, UUID> sessionCloseListener;
private final EdgeSessionState sessionState = new EdgeSessionState();
@ -137,7 +138,7 @@ public final class EdgeGrpcSession implements Closeable {
private ScheduledExecutorService sendDownlinkExecutorService;
EdgeGrpcSession(EdgeContextComponent ctx, StreamObserver<ResponseMsg> outputStream, BiConsumer<EdgeId, EdgeGrpcSession> sessionOpenListener,
BiConsumer<EdgeId, UUID> sessionCloseListener, ScheduledExecutorService sendDownlinkExecutorService, int maxInboundMessageSize) {
BiConsumer<Edge, UUID> sessionCloseListener, ScheduledExecutorService sendDownlinkExecutorService, int maxInboundMessageSize) {
this.sessionId = UUID.randomUUID();
this.ctx = ctx;
this.outputStream = outputStream;
@ -206,7 +207,7 @@ public final class EdgeGrpcSession implements Closeable {
connected = false;
if (edge != null) {
try {
sessionCloseListener.accept(edge.getId(), sessionId);
sessionCloseListener.accept(edge, sessionId);
} catch (Exception ignored) {
}
}
@ -314,7 +315,7 @@ public final class EdgeGrpcSession implements Closeable {
} catch (Exception e) {
log.error("[{}][{}] Failed to send downlink message [{}]", this.tenantId, this.sessionId, downlinkMsg, e);
connected = false;
sessionCloseListener.accept(edge.getId(), sessionId);
sessionCloseListener.accept(edge, sessionId);
} finally {
downlinkMsgLock.unlock();
}
@ -466,15 +467,26 @@ public final class EdgeGrpcSession implements Closeable {
if (isConnected() && sessionState.getPendingMsgsMap().values().size() > 0) {
List<DownlinkMsg> copy = new ArrayList<>(sessionState.getPendingMsgsMap().values());
if (attempt > 1) {
log.warn("[{}][{}] Failed to deliver the batch: {}, attempt: {}", this.tenantId, this.sessionId, copy, attempt);
String error = "Failed to deliver the batch";
String failureMsg = String.format("{%s}: {%s}", error, copy);
if (attempt == 2) {
// Send a failure notification only on the second attempt.
// This ensures that failure alerts are sent just once to avoid redundant notifications.
ctx.getNotificationRuleProcessor().process(EdgeCommunicationFailureTrigger.builder().tenantId(tenantId)
.edgeId(edge.getId()).customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(failureMsg).error(error).build());
}
log.warn("[{}][{}] {}, attempt: {}", this.tenantId, this.sessionId, failureMsg, attempt);
}
log.trace("[{}][{}][{}] downlink msg(s) are going to be send.", this.tenantId, this.sessionId, copy.size());
for (DownlinkMsg downlinkMsg : copy) {
if (this.clientMaxInboundMessageSize != 0 && downlinkMsg.getSerializedSize() > this.clientMaxInboundMessageSize) {
log.error("[{}][{}][{}] Downlink msg size [{}] exceeds client max inbound message size [{}]. Skipping this message. " +
"Please increase value of CLOUD_RPC_MAX_INBOUND_MESSAGE_SIZE env variable on the edge and restart it." +
"Message {}", this.tenantId, edge.getId(), this.sessionId, downlinkMsg.getSerializedSize(),
this.clientMaxInboundMessageSize, downlinkMsg);
String error = String.format("Client max inbound message size [{%s}] is exceeded. Please increase value of CLOUD_RPC_MAX_INBOUND_MESSAGE_SIZE " +
"env variable on the edge and restart it.", this.clientMaxInboundMessageSize);
String message = String.format("Downlink msg size [{%s}] exceeds client max inbound message size [{%s}]. " +
"Please increase value of CLOUD_RPC_MAX_INBOUND_MESSAGE_SIZE env variable on the edge and restart it.", downlinkMsg.getSerializedSize(), this.clientMaxInboundMessageSize);
log.error("[{}][{}][{}] {} Message {}", this.tenantId, edge.getId(), this.sessionId, message, downlinkMsg);
ctx.getNotificationRuleProcessor().process(EdgeCommunicationFailureTrigger.builder().tenantId(tenantId)
.edgeId(edge.getId()).customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(message).error(error).build());
sessionState.getPendingMsgsMap().remove(downlinkMsg.getDownlinkMsgId());
} else {
sendDownlinkMsg(ResponseMsg.newBuilder()
@ -485,8 +497,12 @@ public final class EdgeGrpcSession implements Closeable {
if (attempt < MAX_DOWNLINK_ATTEMPTS) {
scheduleDownlinkMsgsPackSend(attempt + 1);
} else {
String failureMsg = String.format("Failed to deliver messages: %s", copy);
log.warn("[{}][{}] Failed to deliver the batch after {} attempts. Next messages are going to be discarded {}",
this.tenantId, this.sessionId, MAX_DOWNLINK_ATTEMPTS, copy);
ctx.getNotificationRuleProcessor().process(EdgeCommunicationFailureTrigger.builder().tenantId(tenantId).edgeId(edge.getId())
.customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(failureMsg)
.error("Failed to deliver messages after " + MAX_DOWNLINK_ATTEMPTS + " attempts").build());
stopCurrentSendDownlinkMsgsTask(false);
}
} else {
@ -791,7 +807,10 @@ public final class EdgeGrpcSession implements Closeable {
}
}
} catch (Exception e) {
String failureMsg = String.format("Can't process uplink msg [%s] from edge", uplinkMsg);
log.error("[{}][{}] Can't process uplink msg [{}]", this.tenantId, this.sessionId, uplinkMsg, e);
ctx.getNotificationRuleProcessor().process(EdgeCommunicationFailureTrigger.builder().tenantId(tenantId).edgeId(edge.getId())
.customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(failureMsg).error(e.getMessage()).build());
return Futures.immediateFailedFuture(e);
}
return Futures.allAsList(result);
@ -815,15 +834,22 @@ public final class EdgeGrpcSession implements Closeable {
.setMaxInboundMessageSize(maxInboundMessageSize)
.build();
}
String error = "Failed to validate the edge!";
String failureMsg = String.format("{%s} Provided request secret: %s", error, request.getEdgeSecret());
ctx.getNotificationRuleProcessor().process(EdgeCommunicationFailureTrigger.builder().tenantId(tenantId).edgeId(edge.getId())
.customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(failureMsg).error(error).build());
return ConnectResponseMsg.newBuilder()
.setResponseCode(ConnectResponseCode.BAD_CREDENTIALS)
.setErrorMsg("Failed to validate the edge!")
.setErrorMsg(failureMsg)
.setConfiguration(EdgeConfiguration.getDefaultInstance()).build();
} catch (Exception e) {
log.error("[{}] Failed to process edge connection!", request.getEdgeRoutingKey(), e);
String failureMsg = "Failed to process edge connection!";
ctx.getNotificationRuleProcessor().process(EdgeCommunicationFailureTrigger.builder().tenantId(tenantId).edgeId(edge.getId())
.customerId(edge.getCustomerId()).edgeName(edge.getName()).failureMsg(failureMsg).error(e.getMessage()).build());
log.error(failureMsg, e);
return ConnectResponseMsg.newBuilder()
.setResponseCode(ConnectResponseCode.SERVER_UNAVAILABLE)
.setErrorMsg("Failed to process edge connection!")
.setErrorMsg(failureMsg)
.setConfiguration(EdgeConfiguration.getDefaultInstance()).build();
}
}

2
application/src/main/java/org/thingsboard/server/service/edge/rpc/processor/alarm/BaseAlarmProcessor.java

@ -113,7 +113,7 @@ public abstract class BaseAlarmProcessor extends BaseEdgeProcessor {
}
switch (alarmCommentUpdateMsg.getMsgType()) {
case ENTITY_CREATED_RPC_MESSAGE:
alarmCommentDao.createAlarmComment(tenantId, alarmComment);
alarmCommentDao.save(tenantId, alarmComment);
break;
case ENTITY_UPDATED_RPC_MESSAGE:
alarmCommentService.createOrUpdateAlarmComment(tenantId, alarmComment);

5
application/src/main/java/org/thingsboard/server/service/edge/rpc/processor/rule/RuleChainEdgeProcessor.java

@ -18,6 +18,7 @@ package org.thingsboard.server.service.edge.rpc.processor.rule;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Component;
import org.thingsboard.server.common.data.EdgeUtils;
import org.thingsboard.server.common.data.edge.Edge;
import org.thingsboard.server.common.data.edge.EdgeEvent;
import org.thingsboard.server.common.data.id.RuleChainId;
import org.thingsboard.server.common.data.rule.RuleChain;
@ -53,6 +54,10 @@ public class RuleChainEdgeProcessor extends BaseEdgeProcessor {
isRoot = Boolean.parseBoolean(edgeEvent.getBody().get(EDGE_IS_ROOT_BODY_KEY).asText());
} catch (Exception ignored) {}
}
if (!isRoot) {
Edge edge = edgeService.findEdgeById(edgeEvent.getTenantId(), edgeEvent.getEdgeId());
isRoot = edge.getRootRuleChainId().equals(ruleChainId);
}
UpdateMsgType msgType = getUpdateMsgType(edgeEvent.getAction());
RuleChainUpdateMsg ruleChainUpdateMsg = ((RuleChainMsgConstructor)
ruleChainMsgConstructorFactory.getMsgConstructorByEdgeVersion(edgeVersion))

115
application/src/main/java/org/thingsboard/server/service/entitiy/queue/DefaultTbQueueService.java

@ -50,22 +50,15 @@ public class DefaultTbQueueService extends AbstractTbEntityService implements Tb
public Queue saveQueue(Queue queue) {
boolean create = queue.getId() == null;
Queue oldQueue;
if (create) {
oldQueue = null;
} else {
oldQueue = queueService.findQueueById(queue.getTenantId(), queue.getId());
}
//TODO: add checkNotNull
Queue savedQueue = queueService.saveQueue(queue);
if (create) {
onQueueCreated(savedQueue);
} else {
onQueueUpdated(savedQueue, oldQueue);
}
createTopicsIfNeeded(savedQueue, oldQueue);
tbClusterService.onQueuesUpdate(List.of(savedQueue));
return savedQueue;
}
@ -73,54 +66,14 @@ public class DefaultTbQueueService extends AbstractTbEntityService implements Tb
public void deleteQueue(TenantId tenantId, QueueId queueId) {
Queue queue = queueService.findQueueById(tenantId, queueId);
queueService.deleteQueue(tenantId, queueId);
onQueueDeleted(queue);
tbClusterService.onQueuesDelete(List.of(queue));
}
@Override
public void deleteQueueByQueueName(TenantId tenantId, String queueName) {
Queue queue = queueService.findQueueByTenantIdAndNameInternal(tenantId, queueName);
queueService.deleteQueue(tenantId, queue.getId());
onQueueDeleted(queue);
}
private void onQueueCreated(Queue queue) {
for (int i = 0; i < queue.getPartitions(); i++) {
tbQueueAdmin.createTopicIfNotExists(
new TopicPartitionInfo(queue.getTopic(), queue.getTenantId(), i, false).getFullTopicName(),
queue.getCustomProperties()
);
}
tbClusterService.onQueueChange(queue);
}
private void onQueueUpdated(Queue queue, Queue oldQueue) {
int oldPartitions = oldQueue.getPartitions();
int currentPartitions = queue.getPartitions();
if (currentPartitions != oldPartitions) {
if (currentPartitions > oldPartitions) {
log.info("Added [{}] new partitions to [{}] queue", currentPartitions - oldPartitions, queue.getName());
for (int i = oldPartitions; i < currentPartitions; i++) {
tbQueueAdmin.createTopicIfNotExists(
new TopicPartitionInfo(queue.getTopic(), queue.getTenantId(), i, false).getFullTopicName(),
queue.getCustomProperties()
);
}
tbClusterService.onQueueChange(queue);
} else {
log.info("Removed [{}] partitions from [{}] queue", oldPartitions - currentPartitions, queue.getName());
tbClusterService.onQueueChange(queue);
// TODO: move all the messages left in old partitions and delete topics
}
} else if (!oldQueue.equals(queue)) {
tbClusterService.onQueueChange(queue);
}
}
private void onQueueDeleted(Queue queue) {
tbClusterService.onQueueDelete(queue);
// queueStatsService.deleteQueueStatsByQueueId(tenantId, queueId);
tbClusterService.onQueuesDelete(List.of(queue));
}
@Override
@ -176,26 +129,56 @@ public class DefaultTbQueueService extends AbstractTbEntityService implements Tb
log.debug("[{}] Handling profile queue config update: creating queues {}, updating {}, deleting {}. Affected tenants: {}",
newTenantProfile.getUuidId(), toCreate, toUpdate, toRemove, tenantIds);
}
tenantIds.forEach(tenantId -> {
toCreate.forEach(key -> saveQueue(new Queue(tenantId, newQueues.get(key))));
toUpdate.forEach(key -> {
Queue queueToUpdate = new Queue(tenantId, newQueues.get(key));
Queue foundQueue = queueService.findQueueByTenantIdAndName(tenantId, key);
queueToUpdate.setId(foundQueue.getId());
queueToUpdate.setCreatedTime(foundQueue.getCreatedTime());
List<Queue> updated = new ArrayList<>();
List<Queue> deleted = new ArrayList<>();
for (TenantId tenantId : tenantIds) {
for (String name : toCreate) {
updated.add(new Queue(tenantId, newQueues.get(name)));
}
if (!queueToUpdate.equals(foundQueue)) {
saveQueue(queueToUpdate);
for (String name : toUpdate) {
Queue queue = new Queue(tenantId, newQueues.get(name));
Queue foundQueue = queueService.findQueueByTenantIdAndName(tenantId, name);
if (foundQueue != null) {
queue.setId(foundQueue.getId());
queue.setCreatedTime(foundQueue.getCreatedTime());
}
});
if (!queue.equals(foundQueue)) {
updated.add(queue);
createTopicsIfNeeded(queue, foundQueue);
}
}
for (String name : toRemove) {
Queue queue = queueService.findQueueByTenantIdAndNameInternal(tenantId, name);
deleted.add(queue);
}
}
toRemove.forEach(q -> {
Queue queue = queueService.findQueueByTenantIdAndNameInternal(tenantId, q);
QueueId queueIdForRemove = queue.getId();
deleteQueue(tenantId, queueIdForRemove);
if (!updated.isEmpty()) {
updated = updated.stream()
.map(queueService::saveQueue)
.collect(Collectors.toList());
tbClusterService.onQueuesUpdate(updated);
}
if (!deleted.isEmpty()) {
deleted.forEach(queue -> {
queueService.deleteQueue(queue.getTenantId(), queue.getId());
});
});
tbClusterService.onQueuesDelete(deleted);
}
}
private void createTopicsIfNeeded(Queue queue, Queue oldQueue) {
int newPartitions = queue.getPartitions();
int oldPartitions = oldQueue != null ? oldQueue.getPartitions() : 0;
for (int i = oldPartitions; i < newPartitions; i++) {
tbQueueAdmin.createTopicIfNotExists(
new TopicPartitionInfo(queue.getTopic(), queue.getTenantId(), i, false).getFullTopicName(),
queue.getCustomProperties()
);
}
}
}

24
application/src/main/java/org/thingsboard/server/service/install/CassandraTsDatabaseUpgradeService.java

@ -15,7 +15,6 @@
*/
package org.thingsboard.server.service.install;
import com.datastax.oss.driver.api.core.servererrors.InvalidQueryException;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
@ -30,29 +29,6 @@ public class CassandraTsDatabaseUpgradeService extends AbstractCassandraDatabase
@Override
public void upgradeDatabase(String fromVersion) throws Exception {
switch (fromVersion) {
case "2.4.3":
log.info("Updating schema ...");
String updateTsKvTableStmt = "alter table ts_kv_cf add json_v text";
String updateTsKvLatestTableStmt = "alter table ts_kv_latest_cf add json_v text";
try {
log.info("Updating ts ...");
cluster.getSession().execute(updateTsKvTableStmt);
Thread.sleep(2500);
log.info("Ts updated.");
log.info("Updating ts latest ...");
cluster.getSession().execute(updateTsKvLatestTableStmt);
Thread.sleep(2500);
log.info("Ts latest updated.");
} catch (InvalidQueryException e) {
}
log.info("Schema updated.");
break;
case "2.5.0":
case "3.1.1":
case "3.2.1":
case "3.2.2":
break;
default:
throw new RuntimeException("Unable to upgrade Cassandra database, unsupported fromVersion: " + fromVersion);
}

114
application/src/main/java/org/thingsboard/server/service/install/DatabaseHelper.java

@ -1,114 +0,0 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.install;
import com.fasterxml.jackson.databind.JavaType;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.thingsboard.common.util.JacksonUtil;
import org.thingsboard.server.common.data.ShortCustomerInfo;
import org.thingsboard.server.common.data.StringUtils;
import org.thingsboard.server.common.data.UUIDConverter;
import org.thingsboard.server.common.data.id.CustomerId;
import org.thingsboard.server.common.data.id.DashboardId;
import org.thingsboard.server.common.data.id.TenantId;
import org.thingsboard.server.dao.dashboard.DashboardService;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.UUID;
/**
* Created by igor on 2/27/18.
*/
@Slf4j
public class DatabaseHelper {
public static final CSVFormat CSV_DUMP_FORMAT = CSVFormat.DEFAULT.withNullString("\\N");
public static final String DEVICE = "device";
public static final String ENTITY_ID = "entity_id";
public static final String TENANT_ID = "tenant_id";
public static final String ENTITY_TYPE = "entity_type";
public static final String CUSTOMER_ID = "customer_id";
public static final String SEARCH_TEXT = "search_text";
public static final String ADDITIONAL_INFO = "additional_info";
public static final String ASSET = "asset";
public static final String DASHBOARD = "dashboard";
public static final String ENTITY_VIEWS = "entity_views";
public static final String ENTITY_VIEW = "entity_view";
public static final String RULE_CHAIN = "rule_chain";
public static final String ID = "id";
public static final String TITLE = "title";
public static final String TYPE = "type";
public static final String NAME = "name";
public static final String KEYS = "keys";
public static final String START_TS = "start_ts";
public static final String END_TS = "end_ts";
public static final String ASSIGNED_CUSTOMERS = "assigned_customers";
public static final String CONFIGURATION = "configuration";
public static void upgradeTo40_assignDashboards(Path dashboardsDump, DashboardService dashboardService, boolean sql) throws Exception {
JavaType assignedCustomersType =
JacksonUtil.constructCollectionType(HashSet.class, ShortCustomerInfo.class);
try (CSVParser csvParser = new CSVParser(Files.newBufferedReader(dashboardsDump), CSV_DUMP_FORMAT.withFirstRecordAsHeader())) {
csvParser.forEach(record -> {
String customerIdString = record.get(CUSTOMER_ID);
String assignedCustomersString = record.get(ASSIGNED_CUSTOMERS);
DashboardId dashboardId = new DashboardId(toUUID(record.get(ID), sql));
List<CustomerId> customerIds = new ArrayList<>();
if (!StringUtils.isEmpty(assignedCustomersString)) {
try {
Set<ShortCustomerInfo> assignedCustomers = JacksonUtil.fromString(assignedCustomersString, assignedCustomersType);
assignedCustomers.forEach((customerInfo) -> {
CustomerId customerId = customerInfo.getCustomerId();
if (!customerId.isNullUid()) {
customerIds.add(customerId);
}
});
} catch (IllegalArgumentException e) {
log.error("Unable to parse assigned customers field", e);
}
}
if (!StringUtils.isEmpty(customerIdString)) {
CustomerId customerId = new CustomerId(toUUID(customerIdString, sql));
if (!customerId.isNullUid()) {
customerIds.add(customerId);
}
}
for (CustomerId customerId : customerIds) {
dashboardService.assignDashboardToCustomer(TenantId.SYS_TENANT_ID, dashboardId, customerId);
}
});
}
}
private static UUID toUUID(String src, boolean sql) {
if (sql) {
return UUIDConverter.fromString(src);
} else {
return UUID.fromString(src);
}
}
}

21
application/src/main/java/org/thingsboard/server/service/install/DefaultSystemDataLoaderService.java

@ -297,11 +297,6 @@ public class DefaultSystemDataLoaderService implements SystemDataLoaderService {
jwtSettingsService.createRandomJwtSettings();
}
@Override
public void saveLegacyYmlSettings() throws Exception {
jwtSettingsService.saveLegacyYmlSettings();
}
@Override
public void createOAuth2Templates() throws Exception {
installScripts.createOAuth2Templates();
@ -696,7 +691,23 @@ public class DefaultSystemDataLoaderService implements SystemDataLoaderService {
}
@Override
@SneakyThrows
public void updateDefaultNotificationConfigs() {
PageDataIterable<TenantId> tenants = new PageDataIterable<>(tenantService::findTenantsIds, 500);
ExecutorService executor = Executors.newFixedThreadPool(Math.max(Runtime.getRuntime().availableProcessors(), 4));
log.info("Updating default edge failure notification configs for all tenants");
AtomicInteger count = new AtomicInteger();
for (TenantId tenantId : tenants) {
executor.submit(() -> {
notificationSettingsService.updateDefaultNotificationConfigs(tenantId);
int n = count.incrementAndGet();
if (n % 500 == 0) {
log.info("{} tenants processed", n);
}
});
}
executor.shutdown();
executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.SECONDS);
notificationSettingsService.updateDefaultNotificationConfigs(TenantId.SYS_TENANT_ID);
}

691
application/src/main/java/org/thingsboard/server/service/install/SqlDatabaseUpgradeService.java

@ -15,37 +15,11 @@
*/
package org.thingsboard.server.service.install;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Lazy;
import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.data.EntitySubtype;
import org.thingsboard.server.common.data.Tenant;
import org.thingsboard.server.common.data.id.TenantId;
import org.thingsboard.server.common.data.page.PageData;
import org.thingsboard.server.common.data.page.PageLink;
import org.thingsboard.server.common.data.queue.ProcessingStrategy;
import org.thingsboard.server.common.data.queue.ProcessingStrategyType;
import org.thingsboard.server.common.data.queue.Queue;
import org.thingsboard.server.common.data.queue.SubmitStrategy;
import org.thingsboard.server.common.data.queue.SubmitStrategyType;
import org.thingsboard.server.common.data.util.TbPair;
import org.thingsboard.server.dao.asset.AssetDao;
import org.thingsboard.server.dao.asset.AssetProfileService;
import org.thingsboard.server.dao.dashboard.DashboardService;
import org.thingsboard.server.dao.device.DeviceProfileService;
import org.thingsboard.server.dao.device.DeviceService;
import org.thingsboard.server.dao.queue.QueueService;
import org.thingsboard.server.dao.sql.tenant.TenantRepository;
import org.thingsboard.server.dao.tenant.TenantService;
import org.thingsboard.server.dao.usagerecord.ApiUsageStateService;
import org.thingsboard.server.queue.settings.TbRuleEngineQueueConfiguration;
import org.thingsboard.server.service.install.sql.SqlDbHelper;
import org.thingsboard.server.service.install.update.DefaultDataUpdateService;
import java.nio.charset.Charset;
@ -56,34 +30,11 @@ import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLSyntaxErrorException;
import java.sql.SQLWarning;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import static org.thingsboard.server.service.install.DatabaseHelper.ADDITIONAL_INFO;
import static org.thingsboard.server.service.install.DatabaseHelper.ASSIGNED_CUSTOMERS;
import static org.thingsboard.server.service.install.DatabaseHelper.CONFIGURATION;
import static org.thingsboard.server.service.install.DatabaseHelper.CUSTOMER_ID;
import static org.thingsboard.server.service.install.DatabaseHelper.DASHBOARD;
import static org.thingsboard.server.service.install.DatabaseHelper.END_TS;
import static org.thingsboard.server.service.install.DatabaseHelper.ENTITY_ID;
import static org.thingsboard.server.service.install.DatabaseHelper.ENTITY_TYPE;
import static org.thingsboard.server.service.install.DatabaseHelper.ENTITY_VIEW;
import static org.thingsboard.server.service.install.DatabaseHelper.ENTITY_VIEWS;
import static org.thingsboard.server.service.install.DatabaseHelper.ID;
import static org.thingsboard.server.service.install.DatabaseHelper.KEYS;
import static org.thingsboard.server.service.install.DatabaseHelper.NAME;
import static org.thingsboard.server.service.install.DatabaseHelper.SEARCH_TEXT;
import static org.thingsboard.server.service.install.DatabaseHelper.START_TS;
import static org.thingsboard.server.service.install.DatabaseHelper.TENANT_ID;
import static org.thingsboard.server.service.install.DatabaseHelper.TITLE;
import static org.thingsboard.server.service.install.DatabaseHelper.TYPE;
@Service
@Profile("install")
@Slf4j
@ -100,623 +51,12 @@ public class SqlDatabaseUpgradeService implements DatabaseEntitiesUpgradeService
@Value("${spring.datasource.password}")
private String dbPassword;
@Autowired
private DashboardService dashboardService;
@Autowired
private InstallScripts installScripts;
@Autowired
private SystemDataLoaderService systemDataLoaderService;
@Autowired
private TenantService tenantService;
@Autowired
private TenantRepository tenantRepository;
@Autowired
private DeviceService deviceService;
@Autowired
private AssetDao assetDao;
@Autowired
private DeviceProfileService deviceProfileService;
@Autowired
private AssetProfileService assetProfileService;
@Autowired
private ApiUsageStateService apiUsageStateService;
@Lazy
@Autowired
private QueueService queueService;
@Autowired
private TbRuleEngineQueueConfigService queueConfig;
@Autowired
private DbUpgradeExecutorService dbUpgradeExecutor;
@Override
public void upgradeDatabase(String fromVersion) throws Exception {
switch (fromVersion) {
case "1.3.0":
log.info("Updating schema ...");
Path schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "1.3.1", SCHEMA_UPDATE_SQL);
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
loadSql(schemaUpdateFile, conn);
}
log.info("Schema updated.");
break;
case "1.3.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Dumping dashboards ...");
Path dashboardsDump = SqlDbHelper.dumpTableIfExists(conn, DASHBOARD,
new String[]{ID, TENANT_ID, CUSTOMER_ID, TITLE, SEARCH_TEXT, ASSIGNED_CUSTOMERS, CONFIGURATION},
new String[]{"", "", "", "", "", "", ""},
"tb-dashboards", true);
log.info("Dashboards dumped.");
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "1.4.0", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
log.info("Restoring dashboards ...");
if (dashboardsDump != null) {
SqlDbHelper.loadTable(conn, DASHBOARD,
new String[]{ID, TENANT_ID, TITLE, SEARCH_TEXT, CONFIGURATION}, dashboardsDump, true);
DatabaseHelper.upgradeTo40_assignDashboards(dashboardsDump, dashboardService, true);
Files.deleteIfExists(dashboardsDump);
}
log.info("Dashboards restored.");
}
break;
case "1.4.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.0.0", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
}
break;
case "2.0.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.1.1", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
}
break;
case "2.1.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Dumping entity views ...");
Path entityViewsDump = SqlDbHelper.dumpTableIfExists(conn, ENTITY_VIEWS,
new String[]{ID, ENTITY_ID, ENTITY_TYPE, TENANT_ID, CUSTOMER_ID, TYPE, NAME, KEYS, START_TS, END_TS, SEARCH_TEXT, ADDITIONAL_INFO},
new String[]{"", "", "", "", "", "default", "", "", "0", "0", "", ""},
"tb-entity-views", true);
log.info("Entity views dumped.");
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.1.2", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
log.info("Restoring entity views ...");
if (entityViewsDump != null) {
SqlDbHelper.loadTable(conn, ENTITY_VIEW,
new String[]{ID, ENTITY_ID, ENTITY_TYPE, TENANT_ID, CUSTOMER_ID, TYPE, NAME, KEYS, START_TS, END_TS, SEARCH_TEXT, ADDITIONAL_INFO}, entityViewsDump, true);
Files.deleteIfExists(entityViewsDump);
}
log.info("Entity views restored.");
}
break;
case "2.1.3":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.2.0", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
}
break;
case "2.3.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.3.1", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
}
break;
case "2.3.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.4.0", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
try {
conn.createStatement().execute("ALTER TABLE device ADD COLUMN label varchar(255)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
log.info("Schema updated.");
}
break;
case "2.4.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
try {
conn.createStatement().execute("ALTER TABLE asset ADD COLUMN label varchar(255)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "2.4.2", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
try {
conn.createStatement().execute("ALTER TABLE device ADD CONSTRAINT device_name_unq_key UNIQUE (tenant_id, name)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE device_credentials ADD CONSTRAINT device_credentials_id_unq_key UNIQUE (credentials_id)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE asset ADD CONSTRAINT asset_name_unq_key UNIQUE (tenant_id, name)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
log.info("Schema updated.");
}
break;
case "2.4.2":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
try {
conn.createStatement().execute("ALTER TABLE alarm ADD COLUMN propagate_relation_types varchar"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
log.info("Schema updated.");
}
break;
case "2.4.3":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
try {
conn.createStatement().execute("ALTER TABLE attribute_kv ADD COLUMN json_v json;");
} catch (Exception e) {
if (e instanceof SQLSyntaxErrorException) {
try {
conn.createStatement().execute("ALTER TABLE attribute_kv ADD COLUMN json_v varchar(10000000);");
} catch (Exception e1) {
}
}
}
try {
conn.createStatement().execute("ALTER TABLE tenant ADD COLUMN isolated_tb_core boolean DEFAULT (false), ADD COLUMN isolated_tb_rule_engine boolean DEFAULT (false)");
} catch (Exception e) {
}
try {
long ts = System.currentTimeMillis();
conn.createStatement().execute("ALTER TABLE event ADD COLUMN ts bigint DEFAULT " + ts + ";"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
log.info("Schema updated.");
}
break;
case "3.0.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
if (isOldSchema(conn, 3000001)) {
String[] tables = new String[]{"admin_settings", "alarm", "asset", "audit_log", "attribute_kv",
"component_descriptor", "customer", "dashboard", "device", "device_credentials", "event",
"relation", "tb_user", "tenant", "user_credentials", "widget_type", "widgets_bundle",
"rule_chain", "rule_node", "entity_view"};
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.0.1", "schema_update_to_uuid.sql");
loadSql(schemaUpdateFile, conn);
conn.createStatement().execute("call drop_all_idx()");
log.info("Optimizing alarm relations...");
conn.createStatement().execute("DELETE from relation WHERE relation_type_group = 'ALARM' AND relation_type <> 'ALARM_ANY';");
conn.createStatement().execute("DELETE from relation WHERE relation_type_group = 'ALARM' AND relation_type = 'ALARM_ANY' " +
"AND exists(SELECT * FROM alarm WHERE alarm.id = relation.to_id AND alarm.originator_id = relation.from_id)");
log.info("Alarm relations optimized.");
for (String table : tables) {
log.info("Updating table {}.", table);
Statement statement = conn.createStatement();
statement.execute("call update_" + table + "();");
SQLWarning warnings = statement.getWarnings();
if (warnings != null) {
log.info("{}", warnings.getMessage());
SQLWarning nextWarning = warnings.getNextWarning();
while (nextWarning != null) {
log.info("{}", nextWarning.getMessage());
nextWarning = nextWarning.getNextWarning();
}
}
conn.createStatement().execute("DROP PROCEDURE update_" + table);
log.info("Table {} updated.", table);
}
conn.createStatement().execute("call create_all_idx()");
conn.createStatement().execute("DROP PROCEDURE drop_all_idx");
conn.createStatement().execute("DROP PROCEDURE create_all_idx");
conn.createStatement().execute("DROP FUNCTION column_type_to_uuid");
log.info("Updating alarm relations...");
conn.createStatement().execute("UPDATE relation SET relation_type = 'ANY' WHERE relation_type_group = 'ALARM' AND relation_type = 'ALARM_ANY';");
log.info("Alarm relations updated.");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3001000;");
conn.createStatement().execute("VACUUM FULL");
}
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.1.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.1.0", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Schema updated.");
}
break;
case "3.1.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
if (isOldSchema(conn, 3001000)) {
try {
conn.createStatement().execute("ALTER TABLE device ADD COLUMN device_profile_id uuid, ADD COLUMN device_data jsonb");
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE tenant ADD COLUMN tenant_profile_id uuid");
} catch (Exception e) {
}
try {
conn.createStatement().execute("CREATE TABLE IF NOT EXISTS rule_node_state (" +
" id uuid NOT NULL CONSTRAINT rule_node_state_pkey PRIMARY KEY," +
" created_time bigint NOT NULL," +
" rule_node_id uuid NOT NULL," +
" entity_type varchar(32) NOT NULL," +
" entity_id uuid NOT NULL," +
" state_data varchar(16384) NOT NULL," +
" CONSTRAINT rule_node_state_unq_key UNIQUE (rule_node_id, entity_id)," +
" CONSTRAINT fk_rule_node_state_node_id FOREIGN KEY (rule_node_id) REFERENCES rule_node(id) ON DELETE CASCADE)");
} catch (Exception e) {
}
try {
conn.createStatement().execute("CREATE TABLE IF NOT EXISTS api_usage_state (" +
" id uuid NOT NULL CONSTRAINT usage_record_pkey PRIMARY KEY," +
" created_time bigint NOT NULL," +
" tenant_id uuid," +
" entity_type varchar(32)," +
" entity_id uuid," +
" transport varchar(32)," +
" db_storage varchar(32)," +
" re_exec varchar(32)," +
" js_exec varchar(32)," +
" email_exec varchar(32)," +
" sms_exec varchar(32)," +
" CONSTRAINT api_usage_state_unq_key UNIQUE (tenant_id, entity_id)\n" +
");");
} catch (Exception e) {
}
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.1.1", "schema_update_before.sql");
loadSql(schemaUpdateFile, conn);
log.info("Creating default tenant profiles...");
systemDataLoaderService.createDefaultTenantProfiles();
log.info("Updating tenant profiles...");
conn.createStatement().execute("call update_tenant_profiles()");
log.info("Creating default device profiles...");
PageLink pageLink = new PageLink(100);
PageData<Tenant> pageData;
do {
pageData = tenantService.findTenants(pageLink);
for (Tenant tenant : pageData.getData()) {
try {
apiUsageStateService.createDefaultApiUsageState(tenant.getId(), null);
} catch (Exception e) {
}
List<EntitySubtype> deviceTypes = deviceService.findDeviceTypesByTenantId(tenant.getId()).get();
try {
deviceProfileService.createDefaultDeviceProfile(tenant.getId());
} catch (Exception e) {
}
for (EntitySubtype deviceType : deviceTypes) {
try {
deviceProfileService.findOrCreateDeviceProfile(tenant.getId(), deviceType.getType());
} catch (Exception e) {
}
}
}
pageLink = pageLink.nextPageLink();
} while (pageData.hasNext());
log.info("Updating device profiles...");
conn.createStatement().execute("call update_device_profiles()");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.1.1", "schema_update_after.sql");
loadSql(schemaUpdateFile, conn);
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3002000;");
}
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.2.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
try {
conn.createStatement().execute("CREATE INDEX IF NOT EXISTS idx_device_device_profile_id ON device(tenant_id, device_profile_id);");
conn.createStatement().execute("ALTER TABLE dashboard ALTER COLUMN configuration TYPE varchar;");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3002001;");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
log.info("Schema updated.");
}
break;
case "3.2.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
conn.createStatement().execute("CREATE INDEX IF NOT EXISTS idx_audit_log_tenant_id_and_created_time ON audit_log(tenant_id, created_time);");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.2.1", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3002002;");
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.2.2":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
try {
conn.createStatement().execute("ALTER TABLE rule_chain ADD COLUMN type varchar(255) DEFAULT 'CORE'"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception ignored) {
}
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.2.2", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Load Edge TTL functions ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.2.2", "schema_update_ttl.sql");
loadSql(schemaUpdateFile, conn);
log.info("Edge TTL functions successfully loaded!");
log.info("Updating indexes and TTL procedure for event table...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.2.2", "schema_update_event.sql");
loadSql(schemaUpdateFile, conn);
log.info("Updating schema settings...");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3003000;");
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.3.2":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.3.2", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
try {
conn.createStatement().execute("ALTER TABLE alarm ADD COLUMN propagate_to_owner boolean DEFAULT false;"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
conn.createStatement().execute("ALTER TABLE alarm ADD COLUMN propagate_to_tenant boolean DEFAULT false;"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception ignored) {
}
try {
conn.createStatement().execute("insert into entity_alarm(tenant_id, entity_id, created_time, alarm_type, customer_id, alarm_id)" +
" select tenant_id, originator_id, created_time, type, customer_id, id from alarm ON CONFLICT DO NOTHING;");
conn.createStatement().execute("insert into entity_alarm(tenant_id, entity_id, created_time, alarm_type, customer_id, alarm_id)" +
" select a.tenant_id, r.from_id, created_time, type, customer_id, id" +
" from alarm a inner join relation r on r.relation_type_group = 'ALARM' and r.relation_type = 'ANY' and a.id = r.to_id ON CONFLICT DO NOTHING;");
conn.createStatement().execute("delete from relation r where r.relation_type_group = 'ALARM';");
} catch (Exception e) {
log.error("Failed to update alarm relations!!!", e);
}
log.info("Updating lwm2m device profiles ...");
try {
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.3.2", "schema_update_lwm2m_bootstrap.sql");
loadSql(schemaUpdateFile, conn);
log.info("Updating server`s public key from HexDec to Base64 in profile for LWM2M...");
conn.createStatement().execute("call update_profile_bootstrap();");
log.info("Server`s public key from HexDec to Base64 in profile for LWM2M updated.");
log.info("Updating client`s public key and secret key from HexDec to Base64 for LWM2M...");
conn.createStatement().execute("call update_device_credentials_to_base64_and_bootstrap();");
log.info("Client`s public key and secret key from HexDec to Base64 for LWM2M updated.");
} catch (Exception e) {
log.error("Failed to update lwm2m profiles!!!", e);
}
log.info("Updating schema settings...");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3003003;");
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.3.3":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
try {
conn.createStatement().execute("ALTER TABLE edge DROP COLUMN edge_license_key;"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
conn.createStatement().execute("ALTER TABLE edge DROP COLUMN cloud_endpoint;"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception ignored) {
}
log.info("Updating TTL cleanup procedure for the event table...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.3.3", "schema_event_ttl_procedure.sql");
loadSql(schemaUpdateFile, conn);
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.3.3", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Updating schema settings...");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3003004;");
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.3.4":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.3.4", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Loading queues...");
try {
if (!CollectionUtils.isEmpty(queueConfig.getQueues())) {
queueConfig.getQueues().forEach(queueSettings -> {
Queue queue = queueConfigToQueue(queueSettings);
Queue existing = queueService.findQueueByTenantIdAndName(queue.getTenantId(), queue.getName());
if (existing == null) {
queueService.saveQueue(queue);
}
});
} else {
systemDataLoaderService.createQueues();
}
} catch (Exception e) {
}
log.info("Updating schema settings...");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3004000;");
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.4.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.4.0", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
log.info("Updating schema settings...");
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3004001;");
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.4.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
runSchemaUpdateScript(conn, "3.4.1");
if (isOldSchema(conn, 3004001)) {
try {
conn.createStatement().execute("ALTER TABLE asset ADD COLUMN asset_profile_id uuid");
} catch (Exception e) {
}
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.4.1", "schema_update_before.sql");
loadSql(schemaUpdateFile, conn);
conn.createStatement().execute("DELETE FROM asset a WHERE NOT exists(SELECT id FROM tenant WHERE id = a.tenant_id);");
log.info("Creating default asset profiles...");
PageLink pageLink = new PageLink(1000);
PageData<TenantId> tenantIds;
do {
List<ListenableFuture<?>> futures = new ArrayList<>();
tenantIds = tenantService.findTenantsIds(pageLink);
for (TenantId tenantId : tenantIds.getData()) {
futures.add(dbUpgradeExecutor.submit(() -> {
try {
assetProfileService.createDefaultAssetProfile(tenantId);
} catch (Exception e) {
}
}));
}
Futures.allAsList(futures).get();
pageLink = pageLink.nextPageLink();
} while (tenantIds.hasNext());
pageLink = new PageLink(1000);
PageData<TbPair<UUID, String>> pairs;
do {
List<ListenableFuture<?>> futures = new ArrayList<>();
pairs = assetDao.getAllAssetTypes(pageLink);
for (TbPair<UUID, String> pair : pairs.getData()) {
TenantId tenantId = new TenantId(pair.getFirst());
String assetType = pair.getSecond();
if (!"default".equals(assetType)) {
futures.add(dbUpgradeExecutor.submit(() -> {
try {
assetProfileService.findOrCreateAssetProfile(tenantId, assetType);
} catch (Exception e) {
}
}));
}
}
Futures.allAsList(futures).get();
pageLink = pageLink.nextPageLink();
} while (pairs.hasNext());
log.info("Updating asset profiles...");
conn.createStatement().execute("call update_asset_profiles()");
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.4.1", "schema_update_after.sql");
loadSql(schemaUpdateFile, conn);
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3004002;");
}
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.4.4":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Updating schema ...");
if (isOldSchema(conn, 3004002)) {
schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.4.4", SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, conn);
try {
conn.createStatement().execute("VACUUM FULL ANALYZE alarm;"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE asset_profile ADD COLUMN default_edge_rule_chain_id uuid"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE device_profile ADD COLUMN default_edge_rule_chain_id uuid"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE asset_profile ADD CONSTRAINT fk_default_edge_rule_chain_asset_profile FOREIGN KEY (default_edge_rule_chain_id) REFERENCES rule_chain(id)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
try {
conn.createStatement().execute("ALTER TABLE device_profile ADD CONSTRAINT fk_default_edge_rule_chain_device_profile FOREIGN KEY (default_edge_rule_chain_id) REFERENCES rule_chain(id)"); //NOSONAR, ignoring because method used to execute thingsboard database upgrade script
} catch (Exception e) {
}
conn.createStatement().execute("UPDATE tb_schema_settings SET schema_version = 3005000;");
}
log.info("Schema updated.");
} catch (Exception e) {
log.error("Failed updating schema!!!", e);
}
break;
case "3.5.0":
updateSchema("3.5.0", 3005000, "3.5.1", 3005001, null);
break;
@ -726,7 +66,7 @@ public class SqlDatabaseUpgradeService implements DatabaseEntitiesUpgradeService
"asset_profile", "asset", "device_profile", "tb_user", "tenant_profile", "tenant", "widgets_bundle", "entity_view", "edge"};
for (String entityName : entityNames) {
try {
conn.createStatement().execute("ALTER TABLE " + entityName + " DROP COLUMN " + SEARCH_TEXT + " CASCADE");
conn.createStatement().execute("ALTER TABLE " + entityName + " DROP COLUMN search_text CASCADE");
} catch (Exception e) {
}
}
@ -799,11 +139,6 @@ public class SqlDatabaseUpgradeService implements DatabaseEntitiesUpgradeService
}
}
private void runSchemaUpdateScript(Connection connection, String version) throws Exception {
Path schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", version, SCHEMA_UPDATE_SQL);
loadSql(schemaUpdateFile, connection);
}
private void loadSql(Path sqlFile, Connection conn) throws Exception {
String sql = new String(Files.readAllBytes(sqlFile), Charset.forName("UTF-8"));
Statement st = conn.createStatement();
@ -848,28 +183,4 @@ public class SqlDatabaseUpgradeService implements DatabaseEntitiesUpgradeService
}
return isOldSchema;
}
private Queue queueConfigToQueue(TbRuleEngineQueueConfiguration queueSettings) {
Queue queue = new Queue();
queue.setTenantId(TenantId.SYS_TENANT_ID);
queue.setName(queueSettings.getName());
queue.setTopic(queueSettings.getTopic());
queue.setPollInterval(queueSettings.getPollInterval());
queue.setPartitions(queueSettings.getPartitions());
queue.setPackProcessingTimeout(queueSettings.getPackProcessingTimeout());
SubmitStrategy submitStrategy = new SubmitStrategy();
submitStrategy.setBatchSize(queueSettings.getSubmitStrategy().getBatchSize());
submitStrategy.setType(SubmitStrategyType.valueOf(queueSettings.getSubmitStrategy().getType()));
queue.setSubmitStrategy(submitStrategy);
ProcessingStrategy processingStrategy = new ProcessingStrategy();
processingStrategy.setType(ProcessingStrategyType.valueOf(queueSettings.getProcessingStrategy().getType()));
processingStrategy.setRetries(queueSettings.getProcessingStrategy().getRetries());
processingStrategy.setFailurePercentage(queueSettings.getProcessingStrategy().getFailurePercentage());
processingStrategy.setPauseBetweenRetries(queueSettings.getProcessingStrategy().getPauseBetweenRetries());
processingStrategy.setMaxPauseBetweenRetries(queueSettings.getProcessingStrategy().getMaxPauseBetweenRetries());
queue.setProcessingStrategy(processingStrategy);
queue.setConsumerPerPartition(queueSettings.isConsumerPerPartition());
return queue;
}
}

209
application/src/main/java/org/thingsboard/server/service/install/SqlTsDatabaseUpgradeService.java

@ -16,20 +16,13 @@
package org.thingsboard.server.service.install;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.SystemUtils;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.data.StringUtils;
import org.thingsboard.server.dao.util.SqlTsDao;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.DriverManager;
@Service
@Profile("install")
@ -37,216 +30,14 @@ import java.sql.DriverManager;
@SqlTsDao
public class SqlTsDatabaseUpgradeService extends AbstractSqlTsDatabaseUpgradeService implements DatabaseTsUpgradeService {
@Value("${sql.postgres.ts_key_value_partitioning:MONTHS}")
private String partitionType;
private static final String TS_KV_LATEST_SQL = "ts_kv_latest.sql";
private static final String LOAD_FUNCTIONS_SQL = "schema_update_psql_ts.sql";
private static final String LOAD_TTL_FUNCTIONS_SQL = "schema_update_ttl.sql";
private static final String LOAD_DROP_PARTITIONS_FUNCTIONS_SQL = "schema_update_psql_drop_partitions.sql";
private static final String TS_KV_OLD = "ts_kv_old;";
private static final String TS_KV_LATEST_OLD = "ts_kv_latest_old;";
private static final String CREATE_PARTITION_TS_KV_TABLE = "create_partition_ts_kv_table()";
private static final String CREATE_NEW_TS_KV_LATEST_TABLE = "create_new_ts_kv_latest_table()";
private static final String CREATE_PARTITIONS = "create_partitions(IN partition_type varchar)";
private static final String CREATE_TS_KV_DICTIONARY_TABLE = "create_ts_kv_dictionary_table()";
private static final String INSERT_INTO_DICTIONARY = "insert_into_dictionary()";
private static final String INSERT_INTO_TS_KV = "insert_into_ts_kv(IN path_to_file varchar)";
private static final String INSERT_INTO_TS_KV_LATEST = "insert_into_ts_kv_latest(IN path_to_file varchar)";
private static final String INSERT_INTO_TS_KV_CURSOR = "insert_into_ts_kv_cursor()";
private static final String INSERT_INTO_TS_KV_LATEST_CURSOR = "insert_into_ts_kv_latest_cursor()";
private static final String CALL_CREATE_PARTITION_TS_KV_TABLE = CALL_REGEX + CREATE_PARTITION_TS_KV_TABLE;
private static final String CALL_CREATE_NEW_TS_KV_LATEST_TABLE = CALL_REGEX + CREATE_NEW_TS_KV_LATEST_TABLE;
private static final String CALL_CREATE_TS_KV_DICTIONARY_TABLE = CALL_REGEX + CREATE_TS_KV_DICTIONARY_TABLE;
private static final String CALL_INSERT_INTO_DICTIONARY = CALL_REGEX + INSERT_INTO_DICTIONARY;
private static final String CALL_INSERT_INTO_TS_KV_CURSOR = CALL_REGEX + INSERT_INTO_TS_KV_CURSOR;
private static final String CALL_INSERT_INTO_TS_KV_LATEST_CURSOR = CALL_REGEX + INSERT_INTO_TS_KV_LATEST_CURSOR;
private static final String DROP_TABLE_TS_KV_OLD = DROP_TABLE + TS_KV_OLD;
private static final String DROP_TABLE_TS_KV_LATEST_OLD = DROP_TABLE + TS_KV_LATEST_OLD;
private static final String DROP_PROCEDURE_CREATE_PARTITION_TS_KV_TABLE = DROP_PROCEDURE_IF_EXISTS + CREATE_PARTITION_TS_KV_TABLE;
private static final String DROP_PROCEDURE_CREATE_NEW_TS_KV_LATEST_TABLE = DROP_PROCEDURE_IF_EXISTS + CREATE_NEW_TS_KV_LATEST_TABLE;
private static final String DROP_PROCEDURE_CREATE_PARTITIONS = DROP_PROCEDURE_IF_EXISTS + CREATE_PARTITIONS;
private static final String DROP_PROCEDURE_CREATE_TS_KV_DICTIONARY_TABLE = DROP_PROCEDURE_IF_EXISTS + CREATE_TS_KV_DICTIONARY_TABLE;
private static final String DROP_PROCEDURE_INSERT_INTO_DICTIONARY = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_DICTIONARY;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV_LATEST = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV_LATEST;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV_CURSOR = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV_CURSOR;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV_LATEST_CURSOR = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV_LATEST_CURSOR;
private static final String DROP_FUNCTION_GET_PARTITION_DATA = "DROP FUNCTION IF EXISTS get_partitions_data;";
@Override
public void upgradeDatabase(String fromVersion) throws Exception {
switch (fromVersion) {
case "2.4.3":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Check the current PostgreSQL version...");
boolean versionValid = checkVersion(conn);
if (!versionValid) {
throw new RuntimeException("PostgreSQL version should be at least more than 11, please upgrade your PostgreSQL and restart the script!");
} else {
log.info("PostgreSQL version is valid!");
if (isOldSchema(conn, 2004003)) {
log.info("Load upgrade functions ...");
loadSql(conn, LOAD_FUNCTIONS_SQL, "2.4.3");
log.info("Updating timeseries schema ...");
executeQuery(conn, CALL_CREATE_PARTITION_TS_KV_TABLE);
if (!partitionType.equals("INDEFINITE")) {
executeQuery(conn, "call create_partitions('" + partitionType + "')");
}
executeQuery(conn, CALL_CREATE_TS_KV_DICTIONARY_TABLE);
executeQuery(conn, CALL_INSERT_INTO_DICTIONARY);
Path pathToTempTsKvFile = null;
Path pathToTempTsKvLatestFile = null;
if (SystemUtils.IS_OS_WINDOWS) {
log.info("Lookup for environment variable: {} ...", THINGSBOARD_WINDOWS_UPGRADE_DIR);
Path pathToDir;
String thingsboardWindowsUpgradeDir = System.getenv("THINGSBOARD_WINDOWS_UPGRADE_DIR");
if (StringUtils.isNotEmpty(thingsboardWindowsUpgradeDir)) {
log.info("Environment variable: {} was found!", THINGSBOARD_WINDOWS_UPGRADE_DIR);
pathToDir = Paths.get(thingsboardWindowsUpgradeDir);
} else {
log.info("Failed to lookup environment variable: {}", THINGSBOARD_WINDOWS_UPGRADE_DIR);
pathToDir = Paths.get(PATH_TO_USERS_PUBLIC_FOLDER);
}
log.info("Directory: {} will be used for creation temporary upgrade files!", pathToDir);
try {
Path tsKvFile = Files.createTempFile(pathToDir, "ts_kv", ".sql");
Path tsKvLatestFile = Files.createTempFile(pathToDir, "ts_kv_latest", ".sql");
pathToTempTsKvFile = tsKvFile.toAbsolutePath();
pathToTempTsKvLatestFile = tsKvLatestFile.toAbsolutePath();
try {
copyTimeseries(conn, pathToTempTsKvFile, pathToTempTsKvLatestFile);
} catch (Exception e) {
insertTimeseries(conn);
}
} catch (IOException | SecurityException e) {
log.warn("Failed to create time-series upgrade files due to: {}", e.getMessage());
insertTimeseries(conn);
}
} else {
try {
Path tempDirPath = Files.createTempDirectory("ts_kv");
File tempDirAsFile = tempDirPath.toFile();
boolean writable = tempDirAsFile.setWritable(true, false);
boolean readable = tempDirAsFile.setReadable(true, false);
boolean executable = tempDirAsFile.setExecutable(true, false);
pathToTempTsKvFile = tempDirPath.resolve(TS_KV_SQL).toAbsolutePath();
pathToTempTsKvLatestFile = tempDirPath.resolve(TS_KV_LATEST_SQL).toAbsolutePath();
try {
if (writable && readable && executable) {
copyTimeseries(conn, pathToTempTsKvFile, pathToTempTsKvLatestFile);
} else {
throw new RuntimeException("Failed to grant write permissions for the: " + tempDirPath + "folder!");
}
} catch (Exception e) {
insertTimeseries(conn);
}
} catch (IOException | SecurityException e) {
log.warn("Failed to create time-series upgrade files due to: {}", e.getMessage());
insertTimeseries(conn);
}
}
removeUpgradeFiles(pathToTempTsKvFile, pathToTempTsKvLatestFile);
executeQuery(conn, DROP_TABLE_TS_KV_OLD);
executeQuery(conn, DROP_TABLE_TS_KV_LATEST_OLD);
executeQuery(conn, DROP_PROCEDURE_CREATE_PARTITION_TS_KV_TABLE);
executeQuery(conn, DROP_PROCEDURE_CREATE_PARTITIONS);
executeQuery(conn, DROP_PROCEDURE_CREATE_TS_KV_DICTIONARY_TABLE);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_DICTIONARY);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV);
executeQuery(conn, DROP_PROCEDURE_CREATE_NEW_TS_KV_LATEST_TABLE);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV_LATEST);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV_CURSOR);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV_LATEST_CURSOR);
executeQuery(conn, DROP_FUNCTION_GET_PARTITION_DATA);
executeQuery(conn, "ALTER TABLE ts_kv ADD COLUMN IF NOT EXISTS json_v json;");
executeQuery(conn, "ALTER TABLE ts_kv_latest ADD COLUMN IF NOT EXISTS json_v json;");
} else {
executeQuery(conn, "ALTER TABLE ts_kv DROP CONSTRAINT IF EXISTS ts_kv_pkey;");
executeQuery(conn, "ALTER TABLE ts_kv ADD CONSTRAINT ts_kv_pkey PRIMARY KEY (entity_id, key, ts);");
}
log.info("Load TTL functions ...");
loadSql(conn, LOAD_TTL_FUNCTIONS_SQL, "2.4.3");
log.info("Load Drop Partitions functions ...");
loadSql(conn, LOAD_DROP_PARTITIONS_FUNCTIONS_SQL, "2.4.3");
executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005000");
log.info("schema timeseries updated!");
}
}
break;
case "2.5.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
executeQuery(conn, "CREATE TABLE IF NOT EXISTS ts_kv_indefinite PARTITION OF ts_kv DEFAULT;");
executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005001");
}
break;
case "3.1.1":
case "3.2.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Load TTL functions ...");
loadSql(conn, LOAD_TTL_FUNCTIONS_SQL, "2.4.3");
log.info("Load Drop Partitions functions ...");
loadSql(conn, LOAD_DROP_PARTITIONS_FUNCTIONS_SQL, "2.4.3");
executeQuery(conn, "DROP PROCEDURE IF EXISTS cleanup_timeseries_by_ttl(character varying, bigint, bigint);");
executeQuery(conn, "DROP FUNCTION IF EXISTS delete_asset_records_from_ts_kv(character varying, character varying, bigint);");
executeQuery(conn, "DROP FUNCTION IF EXISTS delete_device_records_from_ts_kv(character varying, character varying, bigint);");
executeQuery(conn, "DROP FUNCTION IF EXISTS delete_customer_records_from_ts_kv(character varying, character varying, bigint);");
}
break;
case "3.2.2":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Load Drop Partitions functions ...");
loadSql(conn, LOAD_DROP_PARTITIONS_FUNCTIONS_SQL, "2.4.3");
}
break;
default:
throw new RuntimeException("Unable to upgrade SQL database, unsupported fromVersion: " + fromVersion);
}
}
private void removeUpgradeFiles(Path pathToTempTsKvFile, Path pathToTempTsKvLatestFile) {
if (pathToTempTsKvFile != null && pathToTempTsKvFile.toFile().exists()) {
boolean deleteTsKvFile = pathToTempTsKvFile.toFile().delete();
if (deleteTsKvFile) {
log.info("Successfully deleted the temp file for ts_kv table upgrade!");
}
}
if (pathToTempTsKvLatestFile != null && pathToTempTsKvLatestFile.toFile().exists()) {
boolean deleteTsKvLatestFile = pathToTempTsKvLatestFile.toFile().delete();
if (deleteTsKvLatestFile) {
log.info("Successfully deleted the temp file for ts_kv_latest table upgrade!");
}
}
}
private void copyTimeseries(Connection conn, Path pathToTempTsKvFile, Path pathToTempTsKvLatestFile) {
executeQuery(conn, "call insert_into_ts_kv('" + pathToTempTsKvFile + "')");
executeQuery(conn, CALL_CREATE_NEW_TS_KV_LATEST_TABLE);
executeQuery(conn, "call insert_into_ts_kv_latest('" + pathToTempTsKvLatestFile + "')");
}
private void insertTimeseries(Connection conn) {
log.warn("Upgrade script failed using the copy to/from files strategy!" +
" Trying to perfrom the upgrade using Inserts strategy ...");
executeQuery(conn, CALL_INSERT_INTO_TS_KV_CURSOR);
executeQuery(conn, CALL_CREATE_NEW_TS_KV_LATEST_TABLE);
executeQuery(conn, CALL_INSERT_INTO_TS_KV_LATEST_CURSOR);
}
@Override
protected void loadSql(Connection conn, String fileName, String version) {
Path schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", version, fileName);

2
application/src/main/java/org/thingsboard/server/service/install/SystemDataLoaderService.java

@ -25,8 +25,6 @@ public interface SystemDataLoaderService {
void createRandomJwtSettings() throws Exception;
void saveLegacyYmlSettings() throws Exception;
void createOAuth2Templates() throws Exception;
void loadSystemWidgets() throws Exception;

49
application/src/main/java/org/thingsboard/server/service/install/TbRuleEngineQueueConfigService.java

@ -1,49 +0,0 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.install;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Profile;
import org.thingsboard.server.common.data.DataConstants;
import org.thingsboard.server.queue.settings.TbRuleEngineQueueConfiguration;
import javax.annotation.PostConstruct;
import java.util.List;
@Slf4j
@Data
@EnableAutoConfiguration
@Configuration
@ConfigurationProperties(prefix = "queue.rule-engine")
@Profile("install")
public class TbRuleEngineQueueConfigService {
private String topic;
private List<TbRuleEngineQueueConfiguration> queues;
@PostConstruct
public void validate() {
queues.stream().filter(queue -> queue.getName().equals(DataConstants.MAIN_QUEUE_NAME)).findFirst().orElseThrow(() -> {
log.error("Main queue is not configured in thingsboard.yml");
return new RuntimeException("No \"Main\" queue configured!");
});
}
}

162
application/src/main/java/org/thingsboard/server/service/install/TimescaleTsDatabaseUpgradeService.java

@ -16,21 +16,14 @@
package org.thingsboard.server.service.install;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.SystemUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.data.StringUtils;
import org.thingsboard.server.dao.util.TimescaleDBTsDao;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.sql.Connection;
import java.sql.DriverManager;
@Service
@Profile("install")
@ -38,172 +31,17 @@ import java.sql.DriverManager;
@TimescaleDBTsDao
public class TimescaleTsDatabaseUpgradeService extends AbstractSqlTsDatabaseUpgradeService implements DatabaseTsUpgradeService {
@Value("${sql.timescale.chunk_time_interval:86400000}")
private long chunkTimeInterval;
private static final String LOAD_FUNCTIONS_SQL = "schema_update_timescale_ts.sql";
private static final String LOAD_TTL_FUNCTIONS_SQL = "schema_update_ttl.sql";
private static final String TENANT_TS_KV_OLD_TABLE = "tenant_ts_kv_old;";
private static final String CREATE_TS_KV_LATEST_TABLE = "create_ts_kv_latest_table()";
private static final String CREATE_NEW_TS_KV_TABLE = "create_new_ts_kv_table()";
private static final String CREATE_TS_KV_DICTIONARY_TABLE = "create_ts_kv_dictionary_table()";
private static final String INSERT_INTO_DICTIONARY = "insert_into_dictionary()";
private static final String INSERT_INTO_TS_KV = "insert_into_ts_kv(IN path_to_file varchar)";
private static final String INSERT_INTO_TS_KV_CURSOR = "insert_into_ts_kv_cursor()";
private static final String INSERT_INTO_TS_KV_LATEST = "insert_into_ts_kv_latest()";
private static final String CALL_CREATE_TS_KV_LATEST_TABLE = CALL_REGEX + CREATE_TS_KV_LATEST_TABLE;
private static final String CALL_CREATE_NEW_TENANT_TS_KV_TABLE = CALL_REGEX + CREATE_NEW_TS_KV_TABLE;
private static final String CALL_CREATE_TS_KV_DICTIONARY_TABLE = CALL_REGEX + CREATE_TS_KV_DICTIONARY_TABLE;
private static final String CALL_INSERT_INTO_DICTIONARY = CALL_REGEX + INSERT_INTO_DICTIONARY;
private static final String CALL_INSERT_INTO_TS_KV_LATEST = CALL_REGEX + INSERT_INTO_TS_KV_LATEST;
private static final String CALL_INSERT_INTO_TS_KV_CURSOR = CALL_REGEX + INSERT_INTO_TS_KV_CURSOR;
private static final String DROP_OLD_TENANT_TS_KV_TABLE = DROP_TABLE + TENANT_TS_KV_OLD_TABLE;
private static final String DROP_PROCEDURE_CREATE_TS_KV_LATEST_TABLE = DROP_PROCEDURE_IF_EXISTS + CREATE_TS_KV_LATEST_TABLE;
private static final String DROP_PROCEDURE_CREATE_TENANT_TS_KV_TABLE_COPY = DROP_PROCEDURE_IF_EXISTS + CREATE_NEW_TS_KV_TABLE;
private static final String DROP_PROCEDURE_CREATE_TS_KV_DICTIONARY_TABLE = DROP_PROCEDURE_IF_EXISTS + CREATE_TS_KV_DICTIONARY_TABLE;
private static final String DROP_PROCEDURE_INSERT_INTO_DICTIONARY = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_DICTIONARY;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV_CURSOR = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV_CURSOR;
private static final String DROP_PROCEDURE_INSERT_INTO_TS_KV_LATEST = DROP_PROCEDURE_IF_EXISTS + INSERT_INTO_TS_KV_LATEST;
@Autowired
private InstallScripts installScripts;
@Override
public void upgradeDatabase(String fromVersion) throws Exception {
switch (fromVersion) {
case "2.4.3":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
log.info("Check the current PostgreSQL version...");
boolean versionValid = checkVersion(conn);
if (!versionValid) {
throw new RuntimeException("PostgreSQL version should be at least more than 11, please upgrade your PostgreSQL and restart the script!");
} else {
log.info("PostgreSQL version is valid!");
if (isOldSchema(conn, 2004003)) {
log.info("Load upgrade functions ...");
loadSql(conn, LOAD_FUNCTIONS_SQL, "2.4.3");
log.info("Updating timescale schema ...");
executeQuery(conn, CALL_CREATE_TS_KV_LATEST_TABLE);
executeQuery(conn, CALL_CREATE_NEW_TENANT_TS_KV_TABLE);
executeQuery(conn, "SELECT create_hypertable('ts_kv', 'ts', chunk_time_interval => " + chunkTimeInterval + ", if_not_exists => true);");
executeQuery(conn, CALL_CREATE_TS_KV_DICTIONARY_TABLE);
executeQuery(conn, CALL_INSERT_INTO_DICTIONARY);
Path pathToTempTsKvFile = null;
if (SystemUtils.IS_OS_WINDOWS) {
Path pathToDir;
log.info("Lookup for environment variable: {} ...", THINGSBOARD_WINDOWS_UPGRADE_DIR);
String thingsboardWindowsUpgradeDir = System.getenv(THINGSBOARD_WINDOWS_UPGRADE_DIR);
if (StringUtils.isNotEmpty(thingsboardWindowsUpgradeDir)) {
log.info("Environment variable: {} was found!", THINGSBOARD_WINDOWS_UPGRADE_DIR);
pathToDir = Paths.get(thingsboardWindowsUpgradeDir);
} else {
log.info("Failed to lookup environment variable: {}", THINGSBOARD_WINDOWS_UPGRADE_DIR);
pathToDir = Paths.get(PATH_TO_USERS_PUBLIC_FOLDER);
}
log.info("Directory: {} will be used for creation temporary upgrade file!", pathToDir);
try {
Path tsKvFile = Files.createTempFile(pathToDir, "ts_kv", ".sql");
pathToTempTsKvFile = tsKvFile.toAbsolutePath();
try {
executeQuery(conn, "call insert_into_ts_kv('" + pathToTempTsKvFile + "')");
} catch (Exception e) {
insertTimeseries(conn);
}
} catch (IOException | SecurityException e) {
log.warn("Failed to create time-series upgrade files due to: {}", e.getMessage());
insertTimeseries(conn);
}
} else {
try {
Path tempDirPath = Files.createTempDirectory("ts_kv");
File tempDirAsFile = tempDirPath.toFile();
boolean writable = tempDirAsFile.setWritable(true, false);
boolean readable = tempDirAsFile.setReadable(true, false);
boolean executable = tempDirAsFile.setExecutable(true, false);
pathToTempTsKvFile = tempDirPath.resolve(TS_KV_SQL).toAbsolutePath();
try {
if (writable && readable && executable) {
executeQuery(conn, "call insert_into_ts_kv('" + pathToTempTsKvFile + "')");
} else {
throw new RuntimeException("Failed to grant write permissions for the: " + tempDirPath + "folder!");
}
} catch (Exception e) {
insertTimeseries(conn);
}
} catch (IOException | SecurityException e) {
log.warn("Failed to create time-series upgrade files due to: {}", e.getMessage());
insertTimeseries(conn);
}
}
removeUpgradeFile(pathToTempTsKvFile);
executeQuery(conn, CALL_INSERT_INTO_TS_KV_LATEST);
executeQuery(conn, DROP_OLD_TENANT_TS_KV_TABLE);
executeQuery(conn, DROP_PROCEDURE_CREATE_TS_KV_LATEST_TABLE);
executeQuery(conn, DROP_PROCEDURE_CREATE_TENANT_TS_KV_TABLE_COPY);
executeQuery(conn, DROP_PROCEDURE_CREATE_TS_KV_DICTIONARY_TABLE);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_DICTIONARY);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV_CURSOR);
executeQuery(conn, DROP_PROCEDURE_INSERT_INTO_TS_KV_LATEST);
executeQuery(conn, "ALTER TABLE ts_kv ADD COLUMN IF NOT EXISTS json_v json;");
executeQuery(conn, "ALTER TABLE ts_kv_latest ADD COLUMN IF NOT EXISTS json_v json;");
}
log.info("Load TTL functions ...");
loadSql(conn, LOAD_TTL_FUNCTIONS_SQL, "2.4.3");
executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005000");
log.info("schema timescale updated!");
}
}
break;
case "2.5.0":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
executeQuery(conn, "UPDATE tb_schema_settings SET schema_version = 2005001");
}
break;
case "3.1.1":
break;
case "3.2.1":
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
loadSql(conn, LOAD_TTL_FUNCTIONS_SQL, "3.2.1");
}
break;
case "3.2.2":
break;
default:
throw new RuntimeException("Unable to upgrade SQL database, unsupported fromVersion: " + fromVersion);
}
}
private void insertTimeseries(Connection conn) {
log.warn("Upgrade script failed using the copy to/from files strategy!" +
" Trying to perfrom the upgrade using Inserts strategy ...");
executeQuery(conn, CALL_INSERT_INTO_TS_KV_CURSOR);
}
private void removeUpgradeFile(Path pathToTempTsKvFile) {
if (pathToTempTsKvFile != null && pathToTempTsKvFile.toFile().exists()) {
boolean deleteTsKvFile = pathToTempTsKvFile.toFile().delete();
if (deleteTsKvFile) {
log.info("Successfully deleted the temp file for ts_kv table upgrade!");
}
}
}
@Override
protected void loadSql(Connection conn, String fileName, String version) {
Path schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", version, fileName);

218
application/src/main/java/org/thingsboard/server/service/install/cql/CassandraDbHelper.java

@ -1,218 +0,0 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.install.cql;
import com.datastax.oss.driver.api.core.cql.BoundStatementBuilder;
import com.datastax.oss.driver.api.core.cql.PreparedStatement;
import com.datastax.oss.driver.api.core.cql.ResultSet;
import com.datastax.oss.driver.api.core.cql.Row;
import com.datastax.oss.driver.api.core.cql.SimpleStatement;
import com.datastax.oss.driver.api.core.cql.Statement;
import com.datastax.oss.driver.api.core.metadata.schema.KeyspaceMetadata;
import com.datastax.oss.driver.api.core.metadata.schema.TableMetadata;
import com.datastax.oss.driver.api.core.type.DataType;
import com.datastax.oss.protocol.internal.ProtocolConstants;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVPrinter;
import org.apache.commons.csv.CSVRecord;
import org.thingsboard.server.dao.cassandra.guava.GuavaSession;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.UUID;
import static org.thingsboard.server.service.install.DatabaseHelper.CSV_DUMP_FORMAT;
public class CassandraDbHelper {
public static Path dumpCfIfExists(KeyspaceMetadata ks, GuavaSession session, String cfName,
String[] columns, String[] defaultValues, String dumpPrefix) throws Exception {
return dumpCfIfExists(ks, session, cfName, columns, defaultValues, dumpPrefix, false);
}
public static Path dumpCfIfExists(KeyspaceMetadata ks, GuavaSession session, String cfName,
String[] columns, String[] defaultValues, String dumpPrefix, boolean printHeader) throws Exception {
if (ks.getTable(cfName) != null) {
Path dumpFile = Files.createTempFile(dumpPrefix, null);
Files.deleteIfExists(dumpFile);
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (printHeader) {
csvFormat = csvFormat.withHeader(columns);
}
try (CSVPrinter csvPrinter = new CSVPrinter(Files.newBufferedWriter(dumpFile), csvFormat)) {
Statement stmt = SimpleStatement.newInstance("SELECT * FROM " + cfName);
stmt.setPageSize(1000);
ResultSet rs = session.execute(stmt);
Iterator<Row> iter = rs.iterator();
while (iter.hasNext()) {
Row row = iter.next();
if (row != null) {
dumpRow(row, columns, defaultValues, csvPrinter);
}
}
}
return dumpFile;
} else {
return null;
}
}
public static void appendToEndOfLine(Path targetDumpFile, String toAppend) throws Exception {
Path tmp = Files.createTempFile(null, null);
try (CSVParser csvParser = new CSVParser(Files.newBufferedReader(targetDumpFile), CSV_DUMP_FORMAT)) {
try (CSVPrinter csvPrinter = new CSVPrinter(Files.newBufferedWriter(tmp), CSV_DUMP_FORMAT)) {
csvParser.forEach(record -> {
List<String> newRecord = new ArrayList<>();
record.forEach(val -> newRecord.add(val));
newRecord.add(toAppend);
try {
csvPrinter.printRecord(newRecord);
} catch (IOException e) {
throw new RuntimeException("Error appending to EOL", e);
}
});
}
}
Files.move(tmp, targetDumpFile, StandardCopyOption.REPLACE_EXISTING);
}
public static void loadCf(KeyspaceMetadata ks, GuavaSession session, String cfName, String[] columns, Path sourceFile) throws Exception {
loadCf(ks, session, cfName, columns, sourceFile, false);
}
public static void loadCf(KeyspaceMetadata ks, GuavaSession session, String cfName, String[] columns, Path sourceFile, boolean parseHeader) throws Exception {
TableMetadata tableMetadata = ks.getTable(cfName).get();
PreparedStatement prepared = session.prepare(createInsertStatement(cfName, columns));
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (parseHeader) {
csvFormat = csvFormat.withFirstRecordAsHeader();
} else {
csvFormat = CSV_DUMP_FORMAT.withHeader(columns);
}
try (CSVParser csvParser = new CSVParser(Files.newBufferedReader(sourceFile), csvFormat)) {
csvParser.forEach(record -> {
BoundStatementBuilder boundStatementBuilder = new BoundStatementBuilder(prepared.bind());
for (String column : columns) {
setColumnValue(tableMetadata, column, record, boundStatementBuilder);
}
session.execute(boundStatementBuilder.build());
});
}
}
private static void dumpRow(Row row, String[] columns, String[] defaultValues, CSVPrinter csvPrinter) throws Exception {
List<String> record = new ArrayList<>();
for (int i=0;i<columns.length;i++) {
String column = columns[i];
String defaultValue;
if (defaultValues != null && i < defaultValues.length) {
defaultValue = defaultValues[i];
} else {
defaultValue = "";
}
record.add(getColumnValue(column, defaultValue, row));
}
csvPrinter.printRecord(record);
}
private static String getColumnValue(String column, String defaultValue, Row row) {
int index = row.getColumnDefinitions().firstIndexOf(column);
if (index > -1) {
String str;
DataType type = row.getColumnDefinitions().get(index).getType();
try {
if (row.isNull(index)) {
return null;
} else if (type.getProtocolCode() == ProtocolConstants.DataType.DOUBLE) {
str = Double.valueOf(row.getDouble(index)).toString();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.INT) {
str = Integer.valueOf(row.getInt(index)).toString();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.BIGINT) {
str = Long.valueOf(row.getLong(index)).toString();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.UUID) {
str = row.getUuid(index).toString();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.TIMEUUID) {
str = row.getUuid(index).toString();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.FLOAT) {
str = Float.valueOf(row.getFloat(index)).toString();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.TIMESTAMP) {
str = ""+row.getInstant(index).toEpochMilli();
} else if (type.getProtocolCode() == ProtocolConstants.DataType.BOOLEAN) {
str = Boolean.valueOf(row.getBoolean(index)).toString();
} else {
str = row.getString(index);
}
} catch (Exception e) {
str = "";
}
return str;
} else {
return defaultValue;
}
}
private static String createInsertStatement(String cfName, String[] columns) {
StringBuilder insertStatementBuilder = new StringBuilder();
insertStatementBuilder.append("INSERT INTO ").append(cfName).append(" (");
for (String column : columns) {
insertStatementBuilder.append(column).append(",");
}
insertStatementBuilder.deleteCharAt(insertStatementBuilder.length() - 1);
insertStatementBuilder.append(") VALUES (");
for (String column : columns) {
insertStatementBuilder.append("?").append(",");
}
insertStatementBuilder.deleteCharAt(insertStatementBuilder.length() - 1);
insertStatementBuilder.append(")");
return insertStatementBuilder.toString();
}
private static void setColumnValue(TableMetadata tableMetadata, String column,
CSVRecord record, BoundStatementBuilder boundStatementBuilder) {
String value = record.get(column);
DataType type = tableMetadata.getColumn(column).get().getType();
if (value == null) {
boundStatementBuilder.setToNull(column);
} else if (type.getProtocolCode() == ProtocolConstants.DataType.DOUBLE) {
boundStatementBuilder.setDouble(column, Double.valueOf(value));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.INT) {
boundStatementBuilder.setInt(column, Integer.valueOf(value));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.BIGINT) {
boundStatementBuilder.setLong(column, Long.valueOf(value));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.UUID) {
boundStatementBuilder.setUuid(column, UUID.fromString(value));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.TIMEUUID) {
boundStatementBuilder.setUuid(column, UUID.fromString(value));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.FLOAT) {
boundStatementBuilder.setFloat(column, Float.valueOf(value));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.TIMESTAMP) {
boundStatementBuilder.setInstant(column, Instant.ofEpochMilli(Long.valueOf(value)));
} else if (type.getProtocolCode() == ProtocolConstants.DataType.BOOLEAN) {
boundStatementBuilder.setBoolean(column, Boolean.valueOf(value));
} else {
boundStatementBuilder.setString(column, value);
}
}
}

327
application/src/main/java/org/thingsboard/server/service/install/migrate/CassandraEntitiesToSqlMigrateService.java

@ -1,327 +0,0 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.install.migrate;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.data.EntityType;
import org.thingsboard.server.common.data.UUIDConverter;
import org.thingsboard.server.dao.cassandra.CassandraCluster;
import org.thingsboard.server.dao.util.NoSqlAnyDao;
import org.thingsboard.server.service.install.EntityDatabaseSchemaService;
import java.sql.Connection;
import java.sql.DriverManager;
import java.util.Arrays;
import java.util.List;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.bigintColumn;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.booleanColumn;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.doubleColumn;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.enumToIntColumn;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.idColumn;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.jsonColumn;
import static org.thingsboard.server.service.install.migrate.CassandraToSqlColumn.stringColumn;
@Service
@Profile("install")
@NoSqlAnyDao
@Slf4j
public class CassandraEntitiesToSqlMigrateService implements EntitiesMigrateService {
@Autowired
private EntityDatabaseSchemaService entityDatabaseSchemaService;
@Autowired
protected CassandraCluster cluster;
@Value("${spring.datasource.url}")
protected String dbUrl;
@Value("${spring.datasource.username}")
protected String dbUserName;
@Value("${spring.datasource.password}")
protected String dbPassword;
@Override
public void migrate() throws Exception {
log.info("Performing migration of entities data from cassandra to SQL database ...");
entityDatabaseSchemaService.createDatabaseSchema(false);
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
conn.setAutoCommit(false);
for (CassandraToSqlTable table: tables) {
table.migrateToSql(cluster.getSession(), conn);
}
} catch (Exception e) {
log.error("Unexpected error during ThingsBoard entities data migration!", e);
throw e;
}
entityDatabaseSchemaService.createDatabaseIndexes();
}
private static List<CassandraToSqlTable> tables = Arrays.asList(
new CassandraToSqlTable("admin_settings",
idColumn("id"),
stringColumn("key"),
stringColumn("json_value")),
new CassandraToSqlTable("alarm",
idColumn("id"),
idColumn("tenant_id"),
stringColumn("type"),
idColumn("originator_id"),
enumToIntColumn("originator_type", EntityType.class),
stringColumn("severity"),
stringColumn("status"),
bigintColumn("start_ts"),
bigintColumn("end_ts"),
bigintColumn("ack_ts"),
bigintColumn("clear_ts"),
stringColumn("details", "additional_info"),
booleanColumn("propagate"),
stringColumn("propagate_relation_types")),
new CassandraToSqlTable("asset",
idColumn("id"),
idColumn("tenant_id"),
idColumn("customer_id"),
stringColumn("name"),
stringColumn("type"),
stringColumn("label"),
stringColumn("search_text"),
stringColumn("additional_info")) {
@Override
protected boolean onConstraintViolation(List<CassandraToSqlColumnData[]> batchData,
CassandraToSqlColumnData[] data, String constraint) {
if (constraint.equalsIgnoreCase("asset_name_unq_key")) {
this.handleUniqueNameViolation(data, "asset");
return true;
}
return super.onConstraintViolation(batchData, data, constraint);
}
},
new CassandraToSqlTable("audit_log_by_tenant_id", "audit_log",
idColumn("id"),
idColumn("tenant_id"),
idColumn("customer_id"),
idColumn("entity_id"),
stringColumn("entity_type"),
stringColumn("entity_name"),
idColumn("user_id"),
stringColumn("user_name"),
stringColumn("action_type"),
stringColumn("action_data"),
stringColumn("action_status"),
stringColumn("action_failure_details")),
new CassandraToSqlTable("attributes_kv_cf", "attribute_kv",
idColumn("entity_id"),
stringColumn("entity_type"),
stringColumn("attribute_type"),
stringColumn("attribute_key"),
booleanColumn("bool_v", true),
stringColumn("str_v"),
bigintColumn("long_v"),
doubleColumn("dbl_v"),
jsonColumn("json_v"),
bigintColumn("last_update_ts")),
new CassandraToSqlTable("component_descriptor",
idColumn("id"),
stringColumn("type"),
stringColumn("scope"),
stringColumn("name"),
stringColumn("search_text"),
stringColumn("clazz"),
stringColumn("configuration_descriptor"),
stringColumn("actions")) {
@Override
protected boolean onConstraintViolation(List<CassandraToSqlColumnData[]> batchData,
CassandraToSqlColumnData[] data, String constraint) {
if (constraint.equalsIgnoreCase("component_descriptor_clazz_key")) {
String clazz = this.getColumnData(data, "clazz").getValue();
log.warn("Found component_descriptor record with duplicate clazz [{}]. Record will be ignored!", clazz);
this.ignoreRecord(batchData, data);
return true;
}
return super.onConstraintViolation(batchData, data, constraint);
}
},
new CassandraToSqlTable("customer",
idColumn("id"),
idColumn("tenant_id"),
stringColumn("title"),
stringColumn("search_text"),
stringColumn("country"),
stringColumn("state"),
stringColumn("city"),
stringColumn("address"),
stringColumn("address2"),
stringColumn("zip"),
stringColumn("phone"),
stringColumn("email"),
stringColumn("additional_info")),
new CassandraToSqlTable("dashboard",
idColumn("id"),
idColumn("tenant_id"),
stringColumn("title"),
stringColumn("search_text"),
stringColumn("assigned_customers"),
stringColumn("configuration")),
new CassandraToSqlTable("device",
idColumn("id"),
idColumn("tenant_id"),
idColumn("customer_id"),
stringColumn("name"),
stringColumn("type"),
stringColumn("label"),
stringColumn("search_text"),
stringColumn("additional_info")) {
@Override
protected boolean onConstraintViolation(List<CassandraToSqlColumnData[]> batchData,
CassandraToSqlColumnData[] data, String constraint) {
if (constraint.equalsIgnoreCase("device_name_unq_key")) {
this.handleUniqueNameViolation(data, "device");
return true;
}
return super.onConstraintViolation(batchData, data, constraint);
}
},
new CassandraToSqlTable("device_credentials",
idColumn("id"),
idColumn("device_id"),
stringColumn("credentials_type"),
stringColumn("credentials_id"),
stringColumn("credentials_value")),
new CassandraToSqlTable("event",
idColumn("id"),
idColumn("tenant_id"),
idColumn("entity_id"),
stringColumn("entity_type"),
stringColumn("event_type"),
stringColumn("event_uid"),
stringColumn("body"),
new CassandraToSqlEventTsColumn()),
new CassandraToSqlTable("relation",
idColumn("from_id"),
stringColumn("from_type"),
idColumn("to_id"),
stringColumn("to_type"),
stringColumn("relation_type_group"),
stringColumn("relation_type"),
stringColumn("additional_info")),
new CassandraToSqlTable("user", "tb_user",
idColumn("id"),
idColumn("tenant_id"),
idColumn("customer_id"),
stringColumn("email"),
stringColumn("search_text"),
stringColumn("authority"),
stringColumn("first_name"),
stringColumn("last_name"),
stringColumn("additional_info")) {
@Override
protected boolean onConstraintViolation(List<CassandraToSqlColumnData[]> batchData,
CassandraToSqlColumnData[] data, String constraint) {
if (constraint.equalsIgnoreCase("tb_user_email_key")) {
this.handleUniqueEmailViolation(data);
return true;
}
return super.onConstraintViolation(batchData, data, constraint);
}
},
new CassandraToSqlTable("tenant",
idColumn("id"),
stringColumn("title"),
stringColumn("search_text"),
stringColumn("region"),
stringColumn("country"),
stringColumn("state"),
stringColumn("city"),
stringColumn("address"),
stringColumn("address2"),
stringColumn("zip"),
stringColumn("phone"),
stringColumn("email"),
stringColumn("additional_info"),
booleanColumn("isolated_tb_core"),
booleanColumn("isolated_tb_rule_engine")),
new CassandraToSqlTable("user_credentials",
idColumn("id"),
idColumn("user_id"),
booleanColumn("enabled"),
stringColumn("password"),
stringColumn("activate_token"),
stringColumn("reset_token")) {
@Override
protected boolean onConstraintViolation(List<CassandraToSqlColumnData[]> batchData,
CassandraToSqlColumnData[] data, String constraint) {
if (constraint.equalsIgnoreCase("user_credentials_user_id_key")) {
String id = UUIDConverter.fromString(this.getColumnData(data, "id").getValue()).toString();
log.warn("Found user credentials record with duplicate user_id [id:[{}]]. Record will be ignored!", id);
this.ignoreRecord(batchData, data);
return true;
}
return super.onConstraintViolation(batchData, data, constraint);
}
},
new CassandraToSqlTable("widget_type",
idColumn("id"),
idColumn("tenant_id"),
stringColumn("bundle_alias"),
stringColumn("alias"),
stringColumn("name"),
stringColumn("descriptor")),
new CassandraToSqlTable("widgets_bundle",
idColumn("id"),
idColumn("tenant_id"),
stringColumn("alias"),
stringColumn("title"),
stringColumn("search_text")),
new CassandraToSqlTable("rule_chain",
idColumn("id"),
idColumn("tenant_id"),
stringColumn("name"),
stringColumn("search_text"),
idColumn("first_rule_node_id"),
booleanColumn("root"),
booleanColumn("debug_mode"),
stringColumn("configuration"),
stringColumn("additional_info")),
new CassandraToSqlTable("rule_node",
idColumn("id"),
idColumn("rule_chain_id"),
stringColumn("type"),
stringColumn("name"),
booleanColumn("debug_mode"),
stringColumn("search_text"),
stringColumn("configuration"),
stringColumn("additional_info")),
new CassandraToSqlTable("entity_view",
idColumn("id"),
idColumn("tenant_id"),
idColumn("customer_id"),
idColumn("entity_id"),
stringColumn("entity_type"),
stringColumn("name"),
stringColumn("type"),
stringColumn("keys"),
bigintColumn("start_ts"),
bigintColumn("end_ts"),
stringColumn("search_text"),
stringColumn("additional_info"))
);
}

4
application/src/main/java/org/thingsboard/server/service/install/migrate/CassandraTsLatestToSqlMigrateService.java

@ -64,6 +64,8 @@ public class CassandraTsLatestToSqlMigrateService implements TsLatestMigrateServ
private static final int MAX_KEY_LENGTH = 255;
private static final int MAX_STR_V_LENGTH = 10000000;
private static final String SQL_DIR = "sql";
@Autowired
private InsertLatestTsRepository insertLatestTsRepository;
@ -93,7 +95,7 @@ public class CassandraTsLatestToSqlMigrateService implements TsLatestMigrateServ
public void migrate() throws Exception {
log.info("Performing migration of latest timeseries data from cassandra to SQL database ...");
try (Connection conn = DriverManager.getConnection(dbUrl, dbUserName, dbPassword)) {
Path schemaUpdateFile = Paths.get(installScripts.getDataDir(), "upgrade", "3.0.1", "schema_ts_latest.sql");
Path schemaUpdateFile = Paths.get(installScripts.getDataDir(), SQL_DIR, "schema-ts-latest-psql.sql");
loadSql(schemaUpdateFile, conn);
conn.setAutoCommit(false);
for (CassandraToSqlTable table : tables) {

176
application/src/main/java/org/thingsboard/server/service/install/sql/SqlDbHelper.java

@ -1,176 +0,0 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.install.sql;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.csv.CSVFormat;
import org.apache.commons.csv.CSVParser;
import org.apache.commons.csv.CSVPrinter;
import org.apache.commons.csv.CSVRecord;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.thingsboard.server.service.install.DatabaseHelper.CSV_DUMP_FORMAT;
/**
* Created by igor on 2/27/18.
*/
@Slf4j
public class SqlDbHelper {
public static Path dumpTableIfExists(Connection conn, String tableName,
String[] columns, String[] defaultValues, String dumpPrefix) throws Exception {
return dumpTableIfExists(conn, tableName, columns, defaultValues, dumpPrefix, false);
}
public static Path dumpTableIfExists(Connection conn, String tableName,
String[] columns, String[] defaultValues, String dumpPrefix, boolean printHeader) throws Exception {
if (tableExists(conn, tableName)) {
Path dumpFile = Files.createTempFile(dumpPrefix, null);
Files.deleteIfExists(dumpFile);
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (printHeader) {
csvFormat = csvFormat.withHeader(columns);
}
try (CSVPrinter csvPrinter = new CSVPrinter(Files.newBufferedWriter(dumpFile), csvFormat)) {
try (PreparedStatement stmt = conn.prepareStatement("SELECT * FROM " + tableName)) {
try (ResultSet tableRes = stmt.executeQuery()) {
ResultSetMetaData resMetaData = tableRes.getMetaData();
Map<String, Integer> columnIndexMap = new HashMap<>();
for (int i = 1; i <= resMetaData.getColumnCount(); i++) {
String columnName = resMetaData.getColumnName(i);
columnIndexMap.put(columnName.toUpperCase(), i);
}
while(tableRes.next()) {
dumpRow(tableRes, columnIndexMap, columns, defaultValues, csvPrinter);
}
}
}
}
return dumpFile;
} else {
return null;
}
}
private static boolean tableExists(Connection conn, String tableName) {
try (Statement stmt = conn.createStatement()) {
stmt.executeQuery("select * from " + tableName + " where 1=0");
return true;
} catch (Exception e) {
return false;
}
}
public static void loadTable(Connection conn, String tableName, String[] columns, Path sourceFile) throws Exception {
loadTable(conn, tableName, columns, sourceFile, false);
}
public static void loadTable(Connection conn, String tableName, String[] columns, Path sourceFile, boolean parseHeader) throws Exception {
CSVFormat csvFormat = CSV_DUMP_FORMAT;
if (parseHeader) {
csvFormat = csvFormat.withFirstRecordAsHeader();
} else {
csvFormat = CSV_DUMP_FORMAT.withHeader(columns);
}
try (PreparedStatement prepared = conn.prepareStatement(createInsertStatement(tableName, columns))) {
try (CSVParser csvParser = new CSVParser(Files.newBufferedReader(sourceFile), csvFormat)) {
csvParser.forEach(record -> {
try {
for (int i = 0; i < columns.length; i++) {
setColumnValue(i, columns[i], record, prepared);
}
prepared.execute();
} catch (SQLException e) {
log.error("Unable to load table record!", e);
}
});
}
}
}
private static void dumpRow(ResultSet res, Map<String, Integer> columnIndexMap, String[] columns,
String[] defaultValues, CSVPrinter csvPrinter) throws Exception {
List<String> record = new ArrayList<>();
for (int i=0;i<columns.length;i++) {
String column = columns[i];
String defaultValue;
if (defaultValues != null && i < defaultValues.length) {
defaultValue = defaultValues[i];
} else {
defaultValue = "";
}
record.add(getColumnValue(column, defaultValue, columnIndexMap, res));
}
csvPrinter.printRecord(record);
}
private static String getColumnValue(String column, String defaultValue, Map<String, Integer> columnIndexMap, ResultSet res) {
int index = columnIndexMap.containsKey(column.toUpperCase()) ? columnIndexMap.get(column.toUpperCase()) : -1;
if (index > -1) {
String str;
try {
Object obj = res.getObject(index);
if (obj == null) {
return null;
} else {
str = obj.toString();
}
} catch (Exception e) {
str = "";
}
return str;
} else {
return defaultValue;
}
}
private static void setColumnValue(int index, String column,
CSVRecord record, PreparedStatement preparedStatement) throws SQLException {
String value = record.get(column);
int type = preparedStatement.getParameterMetaData().getParameterType(index + 1);
preparedStatement.setObject(index + 1, value, type);
}
private static String createInsertStatement(String tableName, String[] columns) {
StringBuilder insertStatementBuilder = new StringBuilder();
insertStatementBuilder.append("INSERT INTO ").append(tableName).append(" (");
for (String column : columns) {
insertStatementBuilder.append(column).append(",");
}
insertStatementBuilder.deleteCharAt(insertStatementBuilder.length() - 1);
insertStatementBuilder.append(") VALUES (");
for (String column : columns) {
insertStatementBuilder.append("?").append(",");
}
insertStatementBuilder.deleteCharAt(insertStatementBuilder.length() - 1);
insertStatementBuilder.append(")");
return insertStatementBuilder.toString();
}
}

41
application/src/main/java/org/thingsboard/server/service/install/update/DefaultCacheCleanupService.java

@ -48,47 +48,6 @@ public class DefaultCacheCleanupService implements CacheCleanupService {
@Override
public void clearCache(String fromVersion) throws Exception {
switch (fromVersion) {
case "3.0.1":
log.info("Clear cache to upgrade from version 3.0.1 to 3.1.0 ...");
clearAllCaches();
//do not break to show explicit calls for next versions
case "3.1.1":
log.info("Clear cache to upgrade from version 3.1.1 to 3.2.0 ...");
clearCacheByName("devices");
clearCacheByName("deviceProfiles");
clearCacheByName("tenantProfiles");
case "3.2.2":
log.info("Clear cache to upgrade from version 3.2.2 to 3.3.0 ...");
clearCacheByName("devices");
clearCacheByName("deviceProfiles");
clearCacheByName("tenantProfiles");
clearCacheByName("relations");
break;
case "3.3.2":
log.info("Clear cache to upgrade from version 3.3.2 to 3.3.3 ...");
clearAll();
break;
case "3.3.3":
log.info("Clear cache to upgrade from version 3.3.3 to 3.3.4 ...");
clearAll();
break;
case "3.3.4":
log.info("Clear cache to upgrade from version 3.3.4 to 3.4.0 ...");
clearAll();
break;
case "3.4.1":
log.info("Clear cache to upgrade from version 3.4.1 to 3.4.2 ...");
clearCacheByName("assets");
clearCacheByName("repositorySettings");
break;
case "3.4.2":
log.info("Clearing cache to upgrade from version 3.4.2 to 3.4.3 ...");
clearCacheByName("repositorySettings");
break;
case "3.4.4":
log.info("Clearing cache to upgrade from version 3.4.4 to 3.5.0");
clearAll();
break;
case "3.6.1":
log.info("Clearing cache to upgrade from version 3.6.1 to 3.6.2");
clearCacheByName(SECURITY_SETTINGS_CACHE);

572
application/src/main/java/org/thingsboard/server/service/install/update/DefaultDataUpdateService.java

@ -20,85 +20,29 @@ import com.fasterxml.jackson.databind.node.ObjectNode;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.MoreExecutors;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Lazy;
import org.springframework.context.annotation.Profile;
import org.springframework.stereotype.Service;
import org.thingsboard.common.util.JacksonUtil;
import org.thingsboard.rule.engine.flow.TbRuleChainInputNode;
import org.thingsboard.rule.engine.flow.TbRuleChainInputNodeConfiguration;
import org.thingsboard.rule.engine.profile.TbDeviceProfileNode;
import org.thingsboard.rule.engine.profile.TbDeviceProfileNodeConfiguration;
import org.thingsboard.server.common.data.AdminSettings;
import org.thingsboard.server.common.data.DataConstants;
import org.thingsboard.server.common.data.EntityView;
import org.thingsboard.server.common.data.Tenant;
import org.thingsboard.server.common.data.TenantProfile;
import org.thingsboard.server.common.data.alarm.Alarm;
import org.thingsboard.server.common.data.alarm.AlarmInfo;
import org.thingsboard.server.common.data.alarm.AlarmQuery;
import org.thingsboard.server.common.data.alarm.AlarmSeverity;
import org.thingsboard.server.common.data.id.EntityViewId;
import org.thingsboard.server.common.data.id.RuleChainId;
import org.thingsboard.server.common.data.id.RuleNodeId;
import org.thingsboard.server.common.data.id.TenantId;
import org.thingsboard.server.common.data.kv.BaseReadTsKvQuery;
import org.thingsboard.server.common.data.kv.ReadTsKvQuery;
import org.thingsboard.server.common.data.kv.TsKvEntry;
import org.thingsboard.server.common.data.msg.TbNodeConnectionType;
import org.thingsboard.server.common.data.page.PageData;
import org.thingsboard.server.common.data.page.PageDataIterable;
import org.thingsboard.server.common.data.page.PageLink;
import org.thingsboard.server.common.data.page.TimePageLink;
import org.thingsboard.server.common.data.query.DynamicValue;
import org.thingsboard.server.common.data.query.FilterPredicateValue;
import org.thingsboard.server.common.data.queue.ProcessingStrategy;
import org.thingsboard.server.common.data.queue.ProcessingStrategyType;
import org.thingsboard.server.common.data.queue.Queue;
import org.thingsboard.server.common.data.queue.SubmitStrategy;
import org.thingsboard.server.common.data.queue.SubmitStrategyType;
import org.thingsboard.server.common.data.relation.EntityRelation;
import org.thingsboard.server.common.data.relation.RelationTypeGroup;
import org.thingsboard.server.common.data.rule.RuleChain;
import org.thingsboard.server.common.data.rule.RuleChainMetaData;
import org.thingsboard.server.common.data.rule.RuleChainType;
import org.thingsboard.server.common.data.rule.RuleNode;
import org.thingsboard.server.common.data.tenant.profile.TenantProfileQueueConfiguration;
import org.thingsboard.server.dao.DaoUtil;
import org.thingsboard.server.dao.alarm.AlarmDao;
import org.thingsboard.server.dao.audit.AuditLogDao;
import org.thingsboard.server.dao.device.DeviceConnectivityConfiguration;
import org.thingsboard.server.dao.edge.EdgeEventDao;
import org.thingsboard.server.dao.entity.EntityService;
import org.thingsboard.server.dao.entityview.EntityViewService;
import org.thingsboard.server.dao.event.EventService;
import org.thingsboard.server.dao.model.sql.DeviceProfileEntity;
import org.thingsboard.server.dao.queue.QueueService;
import org.thingsboard.server.dao.relation.RelationService;
import org.thingsboard.server.dao.rule.RuleChainService;
import org.thingsboard.server.dao.settings.AdminSettingsService;
import org.thingsboard.server.dao.sql.JpaExecutorService;
import org.thingsboard.server.dao.sql.device.DeviceProfileRepository;
import org.thingsboard.server.dao.tenant.TenantProfileService;
import org.thingsboard.server.dao.tenant.TenantService;
import org.thingsboard.server.dao.timeseries.TimeseriesService;
import org.thingsboard.server.service.component.ComponentDiscoveryService;
import org.thingsboard.server.service.component.RuleNodeClassInfo;
import org.thingsboard.server.service.install.InstallScripts;
import org.thingsboard.server.service.install.SystemDataLoaderService;
import org.thingsboard.server.utils.TbNodeUpgradeUtils;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.stream.Collectors;
import static org.thingsboard.server.common.data.StringUtils.isBlank;
@Service
@Profile("install")
@ -108,58 +52,12 @@ public class DefaultDataUpdateService implements DataUpdateService {
private static final int MAX_PENDING_SAVE_RULE_NODE_FUTURES = 256;
private static final int DEFAULT_PAGE_SIZE = 1024;
@Autowired
private TenantService tenantService;
@Autowired
private RelationService relationService;
@Autowired
private RuleChainService ruleChainService;
@Autowired
private InstallScripts installScripts;
@Autowired
private EntityViewService entityViewService;
@Autowired
private TimeseriesService tsService;
@Autowired
private EntityService entityService;
@Autowired
private AlarmDao alarmDao;
@Autowired
private DeviceProfileRepository deviceProfileRepository;
@Autowired
private RateLimitsUpdater rateLimitsUpdater;
@Autowired
private TenantProfileService tenantProfileService;
@Lazy
@Autowired
private QueueService queueService;
@Autowired
private ComponentDiscoveryService componentDiscoveryService;
@Autowired
private SystemDataLoaderService systemDataLoaderService;
@Autowired
private EventService eventService;
@Autowired
private AuditLogDao auditLogDao;
@Autowired
private EdgeEventDao edgeEventDao;
@Autowired
JpaExecutorService jpaExecutorService;
@ -172,57 +70,6 @@ public class DefaultDataUpdateService implements DataUpdateService {
@Override
public void updateData(String fromVersion) throws Exception {
switch (fromVersion) {
case "1.4.0":
log.info("Updating data from version 1.4.0 to 2.0.0 ...");
tenantsDefaultRuleChainUpdater.updateEntities(null);
break;
case "3.0.1":
log.info("Updating data from version 3.0.1 to 3.1.0 ...");
tenantsEntityViewsUpdater.updateEntities(null);
break;
case "3.1.1":
log.info("Updating data from version 3.1.1 to 3.2.0 ...");
tenantsRootRuleChainUpdater.updateEntities(null);
break;
case "3.2.2":
log.info("Updating data from version 3.2.2 to 3.3.0 ...");
tenantsDefaultEdgeRuleChainUpdater.updateEntities(null);
tenantsAlarmsCustomerUpdater.updateEntities(null);
deviceProfileEntityDynamicConditionsUpdater.updateEntities(null);
updateOAuth2Params();
break;
case "3.3.2":
log.info("Updating data from version 3.3.2 to 3.3.3 ...");
updateNestedRuleChains();
break;
case "3.3.4":
log.info("Updating data from version 3.3.4 to 3.4.0 ...");
tenantsProfileQueueConfigurationUpdater.updateEntities();
rateLimitsUpdater.updateEntities();
break;
case "3.4.0":
boolean skipEventsMigration = getEnv("TB_SKIP_EVENTS_MIGRATION", false);
if (!skipEventsMigration) {
log.info("Updating data from version 3.4.0 to 3.4.1 ...");
eventService.migrateEvents();
}
break;
case "3.4.1":
log.info("Updating data from version 3.4.1 to 3.4.2 ...");
systemDataLoaderService.saveLegacyYmlSettings();
boolean skipAuditLogsMigration = getEnv("TB_SKIP_AUDIT_LOGS_MIGRATION", false);
if (!skipAuditLogsMigration) {
log.info("Starting audit logs migration. Can be skipped with TB_SKIP_AUDIT_LOGS_MIGRATION env variable set to true");
auditLogDao.migrateAuditLogs();
} else {
log.info("Skipping audit logs migration");
}
migrateEdgeEvents("Starting edge events migration. ");
break;
case "3.5.1":
log.info("Updating data from version 3.5.1 to 3.6.0 ...");
migrateEdgeEvents("Starting edge events migration - adding seq_id column. ");
break;
case "3.6.0":
log.info("Updating data from version 3.6.0 to 3.6.1 ...");
migrateDeviceConnectivity();
@ -232,16 +79,6 @@ public class DefaultDataUpdateService implements DataUpdateService {
}
}
private void migrateEdgeEvents(String logPrefix) {
boolean skipEdgeEventsMigration = getEnv("TB_SKIP_EDGE_EVENTS_MIGRATION", false);
if (!skipEdgeEventsMigration) {
log.info(logPrefix + "Can be skipped with TB_SKIP_EDGE_EVENTS_MIGRATION env variable set to true");
edgeEventDao.migrateEdgeEvents();
} else {
log.info("Skipping edge events migration");
}
}
private void migrateDeviceConnectivity() {
if (adminSettingsService.findAdminSettingsByKey(TenantId.SYS_TENANT_ID, "connectivity") == null) {
AdminSettings connectivitySettings = new AdminSettings();
@ -320,27 +157,6 @@ public class DefaultDataUpdateService implements DataUpdateService {
return ruleNodeIds;
}
private final PaginatedUpdater<String, DeviceProfileEntity> deviceProfileEntityDynamicConditionsUpdater =
new PaginatedUpdater<>() {
@Override
protected String getName() {
return "Device Profile Entity Dynamic Conditions Updater";
}
@Override
protected PageData<DeviceProfileEntity> findEntities(String id, PageLink pageLink) {
return DaoUtil.pageToPageData(deviceProfileRepository.findAll(DaoUtil.toPageable(pageLink)));
}
@Override
protected void updateEntity(DeviceProfileEntity deviceProfile) {
if (convertDeviceProfileForVersion330(deviceProfile.getProfileData())) {
deviceProfileRepository.save(deviceProfile);
}
}
};
boolean convertDeviceProfileForVersion330(JsonNode profileData) {
boolean isUpdated = false;
if (profileData.has("alarms") && !profileData.get("alarms").isNull()) {
@ -368,327 +184,6 @@ public class DefaultDataUpdateService implements DataUpdateService {
return isUpdated;
}
private final PaginatedUpdater<String, Tenant> tenantsDefaultRuleChainUpdater =
new PaginatedUpdater<>() {
@Override
protected String getName() {
return "Tenants default rule chain updater";
}
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected PageData<Tenant> findEntities(String region, PageLink pageLink) {
return tenantService.findTenants(pageLink);
}
@Override
protected void updateEntity(Tenant tenant) {
try {
RuleChain ruleChain = ruleChainService.getRootTenantRuleChain(tenant.getId());
if (ruleChain == null) {
installScripts.createDefaultRuleChains(tenant.getId());
}
} catch (Exception e) {
log.error("Unable to update Tenant", e);
}
}
};
private void updateNestedRuleChains() {
try {
var updated = 0;
boolean hasNext = true;
while (hasNext) {
List<EntityRelation> relations = relationService.findRuleNodeToRuleChainRelations(TenantId.SYS_TENANT_ID, RuleChainType.CORE, DEFAULT_PAGE_SIZE);
hasNext = relations.size() == DEFAULT_PAGE_SIZE;
for (EntityRelation relation : relations) {
try {
RuleNodeId sourceNodeId = new RuleNodeId(relation.getFrom().getId());
RuleNode sourceNode = ruleChainService.findRuleNodeById(TenantId.SYS_TENANT_ID, sourceNodeId);
if (sourceNode == null) {
log.info("Skip processing of relation for non existing source rule node: [{}]", sourceNodeId);
relationService.deleteRelation(TenantId.SYS_TENANT_ID, relation);
continue;
}
RuleChainId sourceRuleChainId = sourceNode.getRuleChainId();
RuleChainId targetRuleChainId = new RuleChainId(relation.getTo().getId());
RuleChain targetRuleChain = ruleChainService.findRuleChainById(TenantId.SYS_TENANT_ID, targetRuleChainId);
if (targetRuleChain == null) {
log.info("Skip processing of relation for non existing target rule chain: [{}]", targetRuleChainId);
relationService.deleteRelation(TenantId.SYS_TENANT_ID, relation);
continue;
}
TenantId tenantId = targetRuleChain.getTenantId();
RuleNode targetNode = new RuleNode();
targetNode.setName(targetRuleChain.getName());
targetNode.setRuleChainId(sourceRuleChainId);
targetNode.setType(TbRuleChainInputNode.class.getName());
TbRuleChainInputNodeConfiguration configuration = new TbRuleChainInputNodeConfiguration();
configuration.setRuleChainId(targetRuleChain.getId().toString());
targetNode.setConfiguration(JacksonUtil.valueToTree(configuration));
targetNode.setAdditionalInfo(relation.getAdditionalInfo());
targetNode.setDebugMode(false);
targetNode = ruleChainService.saveRuleNode(tenantId, targetNode);
EntityRelation sourceRuleChainToRuleNode = new EntityRelation();
sourceRuleChainToRuleNode.setFrom(sourceRuleChainId);
sourceRuleChainToRuleNode.setTo(targetNode.getId());
sourceRuleChainToRuleNode.setType(EntityRelation.CONTAINS_TYPE);
sourceRuleChainToRuleNode.setTypeGroup(RelationTypeGroup.RULE_CHAIN);
relationService.saveRelation(tenantId, sourceRuleChainToRuleNode);
EntityRelation sourceRuleNodeToTargetRuleNode = new EntityRelation();
sourceRuleNodeToTargetRuleNode.setFrom(sourceNode.getId());
sourceRuleNodeToTargetRuleNode.setTo(targetNode.getId());
sourceRuleNodeToTargetRuleNode.setType(relation.getType());
sourceRuleNodeToTargetRuleNode.setTypeGroup(RelationTypeGroup.RULE_NODE);
sourceRuleNodeToTargetRuleNode.setAdditionalInfo(relation.getAdditionalInfo());
relationService.saveRelation(tenantId, sourceRuleNodeToTargetRuleNode);
//Delete old relation
relationService.deleteRelation(tenantId, relation);
updated++;
} catch (Exception e) {
log.info("Failed to update RuleNodeToRuleChainRelation: {}", relation, e);
}
}
if (updated > 0) {
log.info("RuleNodeToRuleChainRelations: {} entities updated so far...", updated);
}
}
} catch (Exception e) {
log.error("Unable to update Tenant", e);
}
}
private final PaginatedUpdater<String, Tenant> tenantsDefaultEdgeRuleChainUpdater =
new PaginatedUpdater<>() {
@Override
protected String getName() {
return "Tenants default edge rule chain updater";
}
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected PageData<Tenant> findEntities(String region, PageLink pageLink) {
return tenantService.findTenants(pageLink);
}
@Override
protected void updateEntity(Tenant tenant) {
try {
RuleChain defaultEdgeRuleChain = ruleChainService.getEdgeTemplateRootRuleChain(tenant.getId());
if (defaultEdgeRuleChain == null) {
installScripts.createDefaultEdgeRuleChains(tenant.getId());
}
} catch (Exception e) {
log.error("Unable to update Tenant", e);
}
}
};
private final PaginatedUpdater<String, Tenant> tenantsRootRuleChainUpdater =
new PaginatedUpdater<>() {
@Override
protected String getName() {
return "Tenants root rule chain updater";
}
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected PageData<Tenant> findEntities(String region, PageLink pageLink) {
return tenantService.findTenants(pageLink);
}
@Override
protected void updateEntity(Tenant tenant) {
try {
RuleChain ruleChain = ruleChainService.getRootTenantRuleChain(tenant.getId());
if (ruleChain == null) {
installScripts.createDefaultRuleChains(tenant.getId());
} else {
RuleChainMetaData md = ruleChainService.loadRuleChainMetaData(tenant.getId(), ruleChain.getId());
int oldIdx = md.getFirstNodeIndex();
int newIdx = md.getNodes().size();
if (md.getNodes().size() < oldIdx) {
// Skip invalid rule chains
return;
}
RuleNode oldFirstNode = md.getNodes().get(oldIdx);
if (oldFirstNode.getType().equals(TbDeviceProfileNode.class.getName())) {
// No need to update the rule node twice.
return;
}
RuleNode ruleNode = new RuleNode();
ruleNode.setRuleChainId(ruleChain.getId());
ruleNode.setName("Device Profile Node");
ruleNode.setType(TbDeviceProfileNode.class.getName());
ruleNode.setDebugMode(false);
TbDeviceProfileNodeConfiguration ruleNodeConfiguration = new TbDeviceProfileNodeConfiguration().defaultConfiguration();
ruleNode.setConfiguration(JacksonUtil.valueToTree(ruleNodeConfiguration));
ObjectNode additionalInfo = JacksonUtil.newObjectNode();
additionalInfo.put("description", "Process incoming messages from devices with the alarm rules defined in the device profile. Dispatch all incoming messages with \"Success\" relation type.");
additionalInfo.put("layoutX", 204);
additionalInfo.put("layoutY", 240);
ruleNode.setAdditionalInfo(additionalInfo);
md.getNodes().add(ruleNode);
md.setFirstNodeIndex(newIdx);
md.addConnectionInfo(newIdx, oldIdx, TbNodeConnectionType.SUCCESS);
ruleChainService.saveRuleChainMetaData(tenant.getId(), md, Function.identity());
}
} catch (Exception e) {
log.error("[{}] Unable to update Tenant: {}", tenant.getId(), tenant.getName(), e);
}
}
};
private final PaginatedUpdater<String, Tenant> tenantsEntityViewsUpdater =
new PaginatedUpdater<>() {
@Override
protected String getName() {
return "Tenants entity views updater";
}
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected PageData<Tenant> findEntities(String region, PageLink pageLink) {
return tenantService.findTenants(pageLink);
}
@Override
protected void updateEntity(Tenant tenant) {
updateTenantEntityViews(tenant.getId());
}
};
private void updateTenantEntityViews(TenantId tenantId) {
PageLink pageLink = new PageLink(100);
PageData<EntityView> pageData = entityViewService.findEntityViewByTenantId(tenantId, pageLink);
boolean hasNext = true;
while (hasNext) {
List<ListenableFuture<List<Void>>> updateFutures = new ArrayList<>();
for (EntityView entityView : pageData.getData()) {
updateFutures.add(updateEntityViewLatestTelemetry(entityView));
}
try {
Futures.allAsList(updateFutures).get();
} catch (InterruptedException | ExecutionException e) {
log.error("Failed to copy latest telemetry to entity view", e);
}
if (pageData.hasNext()) {
pageLink = pageLink.nextPageLink();
pageData = entityViewService.findEntityViewByTenantId(tenantId, pageLink);
} else {
hasNext = false;
}
}
}
private ListenableFuture<List<Void>> updateEntityViewLatestTelemetry(EntityView entityView) {
EntityViewId entityId = entityView.getId();
List<String> keys = entityView.getKeys() != null && entityView.getKeys().getTimeseries() != null ?
entityView.getKeys().getTimeseries() : Collections.emptyList();
long startTs = entityView.getStartTimeMs();
long endTs = entityView.getEndTimeMs() == 0 ? Long.MAX_VALUE : entityView.getEndTimeMs();
ListenableFuture<List<String>> keysFuture;
if (keys.isEmpty()) {
keysFuture = Futures.transform(tsService.findAllLatest(TenantId.SYS_TENANT_ID,
entityView.getEntityId()), latest -> latest.stream().map(TsKvEntry::getKey).collect(Collectors.toList()), MoreExecutors.directExecutor());
} else {
keysFuture = Futures.immediateFuture(keys);
}
ListenableFuture<List<TsKvEntry>> latestFuture = Futures.transformAsync(keysFuture, fetchKeys -> {
List<ReadTsKvQuery> queries = fetchKeys.stream().filter(key -> !isBlank(key)).map(key -> new BaseReadTsKvQuery(key, startTs, endTs, 1, "DESC")).collect(Collectors.toList());
if (!queries.isEmpty()) {
return tsService.findAll(TenantId.SYS_TENANT_ID, entityView.getEntityId(), queries);
} else {
return Futures.immediateFuture(null);
}
}, MoreExecutors.directExecutor());
return Futures.transformAsync(latestFuture, latestValues -> {
if (latestValues != null && !latestValues.isEmpty()) {
ListenableFuture<List<Void>> saveFuture = tsService.saveLatest(TenantId.SYS_TENANT_ID, entityId, latestValues);
return saveFuture;
}
return Futures.immediateFuture(null);
}, MoreExecutors.directExecutor());
}
private final PaginatedUpdater<String, Tenant> tenantsAlarmsCustomerUpdater =
new PaginatedUpdater<>() {
final AtomicLong processed = new AtomicLong();
@Override
protected String getName() {
return "Tenants alarms customer updater";
}
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected PageData<Tenant> findEntities(String region, PageLink pageLink) {
return tenantService.findTenants(pageLink);
}
@Override
protected void updateEntity(Tenant tenant) {
updateTenantAlarmsCustomer(tenant.getId(), getName(), processed);
}
};
private void updateTenantAlarmsCustomer(TenantId tenantId, String name, AtomicLong processed) {
AlarmQuery alarmQuery = new AlarmQuery(null, new TimePageLink(1000), null, null, null, false);
PageData<AlarmInfo> alarms = alarmDao.findAlarms(tenantId, alarmQuery);
boolean hasNext = true;
while (hasNext) {
for (Alarm alarm : alarms.getData()) {
if (alarm.getCustomerId() == null && alarm.getOriginator() != null) {
alarm.setCustomerId(entityService.fetchEntityCustomerId(tenantId, alarm.getOriginator()).get());
alarmDao.save(tenantId, alarm);
}
if (processed.incrementAndGet() % 1000 == 0) {
log.info("{}: {} alarms processed so far...", name, processed);
}
}
if (alarms.hasNext()) {
alarmQuery.setPageLink(alarmQuery.getPageLink().nextPageLink());
alarms = alarmDao.findAlarms(tenantId, alarmQuery);
} else {
hasNext = false;
}
}
}
boolean convertDeviceProfileAlarmRulesForVersion330(JsonNode spec) {
if (spec != null) {
if (spec.has("type") && spec.get("type").asText().equals("DURATION")) {
@ -716,73 +211,6 @@ public class DefaultDataUpdateService implements DataUpdateService {
return false;
}
private void updateOAuth2Params() {
log.warn("CAUTION: Update of Oauth2 parameters from 3.2.2 to 3.3.0 available only in ThingsBoard versions 3.3.0/3.3.1");
}
private final PaginatedUpdater<String, TenantProfile> tenantsProfileQueueConfigurationUpdater =
new PaginatedUpdater<>() {
@Override
protected String getName() {
return "Tenant profiles queue configuration updater";
}
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected PageData<TenantProfile> findEntities(String id, PageLink pageLink) {
return tenantProfileService.findTenantProfiles(TenantId.SYS_TENANT_ID, pageLink);
}
@Override
protected void updateEntity(TenantProfile tenantProfile) {
updateTenantProfileQueueConfiguration(tenantProfile);
}
};
private void updateTenantProfileQueueConfiguration(TenantProfile profile) {
try {
List<TenantProfileQueueConfiguration> queueConfiguration = profile.getProfileData().getQueueConfiguration();
if (profile.isIsolatedTbRuleEngine() && (queueConfiguration == null || queueConfiguration.isEmpty())) {
TenantProfileQueueConfiguration mainQueueConfig = getMainQueueConfiguration();
profile.getProfileData().setQueueConfiguration(Collections.singletonList((mainQueueConfig)));
tenantProfileService.saveTenantProfile(TenantId.SYS_TENANT_ID, profile);
List<TenantId> isolatedTenants = tenantService.findTenantIdsByTenantProfileId(profile.getId());
isolatedTenants.forEach(tenantId -> {
queueService.saveQueue(new Queue(tenantId, mainQueueConfig));
});
}
} catch (Exception e) {
log.error("Failed to update tenant profile queue configuration name=[" + profile.getName() + "], id=[" + profile.getId().getId() + "]", e);
}
}
private TenantProfileQueueConfiguration getMainQueueConfiguration() {
TenantProfileQueueConfiguration mainQueueConfiguration = new TenantProfileQueueConfiguration();
mainQueueConfiguration.setName(DataConstants.MAIN_QUEUE_NAME);
mainQueueConfiguration.setTopic(DataConstants.MAIN_QUEUE_TOPIC);
mainQueueConfiguration.setPollInterval(25);
mainQueueConfiguration.setPartitions(10);
mainQueueConfiguration.setConsumerPerPartition(true);
mainQueueConfiguration.setPackProcessingTimeout(2000);
SubmitStrategy mainQueueSubmitStrategy = new SubmitStrategy();
mainQueueSubmitStrategy.setType(SubmitStrategyType.BURST);
mainQueueSubmitStrategy.setBatchSize(1000);
mainQueueConfiguration.setSubmitStrategy(mainQueueSubmitStrategy);
ProcessingStrategy mainQueueProcessingStrategy = new ProcessingStrategy();
mainQueueProcessingStrategy.setType(ProcessingStrategyType.SKIP_ALL_FAILURES);
mainQueueProcessingStrategy.setRetries(3);
mainQueueProcessingStrategy.setFailurePercentage(0);
mainQueueProcessingStrategy.setPauseBetweenRetries(3);
mainQueueProcessingStrategy.setMaxPauseBetweenRetries(3);
mainQueueConfiguration.setProcessingStrategy(mainQueueProcessingStrategy);
return mainQueueConfiguration;
}
public static boolean getEnv(String name, boolean defaultValue) {
String env = System.getenv(name);
if (env == null) {

115
application/src/main/java/org/thingsboard/server/service/install/update/RateLimitsUpdater.java

@ -1,115 +0,0 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.install.update;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.thingsboard.server.common.data.StringUtils;
import org.thingsboard.server.common.data.TenantProfile;
import org.thingsboard.server.common.data.id.TenantId;
import org.thingsboard.server.common.data.page.PageData;
import org.thingsboard.server.common.data.page.PageLink;
import org.thingsboard.server.dao.tenant.TenantProfileService;
@Component
class RateLimitsUpdater extends PaginatedUpdater<String, TenantProfile> {
@Value("#{ environment.getProperty('TB_SERVER_REST_LIMITS_TENANT_ENABLED') ?: environment.getProperty('server.rest.limits.tenant.enabled') ?: 'false' }")
boolean tenantServerRestLimitsEnabled;
@Value("#{ environment.getProperty('TB_SERVER_REST_LIMITS_TENANT_CONFIGURATION') ?: environment.getProperty('server.rest.limits.tenant.configuration') ?: '100:1,2000:60' }")
String tenantServerRestLimitsConfiguration;
@Value("#{ environment.getProperty('TB_SERVER_REST_LIMITS_CUSTOMER_ENABLED') ?: environment.getProperty('server.rest.limits.customer.enabled') ?: 'false' }")
boolean customerServerRestLimitsEnabled;
@Value("#{ environment.getProperty('TB_SERVER_REST_LIMITS_CUSTOMER_CONFIGURATION') ?: environment.getProperty('server.rest.limits.customer.configuration') ?: '50:1,1000:60' }")
String customerServerRestLimitsConfiguration;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SESSIONS_PER_TENANT') ?: environment.getProperty('server.ws.limits.max_sessions_per_tenant') ?: '0' }")
private int maxWsSessionsPerTenant;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SESSIONS_PER_CUSTOMER') ?: environment.getProperty('server.ws.limits.max_sessions_per_customer') ?: '0' }")
private int maxWsSessionsPerCustomer;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SESSIONS_PER_REGULAR_USER') ?: environment.getProperty('server.ws.limits.max_sessions_per_regular_user') ?: '0' }")
private int maxWsSessionsPerRegularUser;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SESSIONS_PER_PUBLIC_USER') ?: environment.getProperty('server.ws.limits.max_sessions_per_public_user') ?: '0' }")
private int maxWsSessionsPerPublicUser;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_QUEUE_PER_WS_SESSION') ?: environment.getProperty('server.ws.limits.max_queue_per_ws_session') ?: '500' }")
private int wsMsgQueueLimitPerSession;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SUBSCRIPTIONS_PER_TENANT') ?: environment.getProperty('server.ws.limits.max_subscriptions_per_tenant') ?: '0' }")
private long maxWsSubscriptionsPerTenant;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SUBSCRIPTIONS_PER_CUSTOMER') ?: environment.getProperty('server.ws.limits.max_subscriptions_per_customer') ?: '0' }")
private long maxWsSubscriptionsPerCustomer;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SUBSCRIPTIONS_PER_REGULAR_USER') ?: environment.getProperty('server.ws.limits.max_subscriptions_per_regular_user') ?: '0' }")
private long maxWsSubscriptionsPerRegularUser;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_SUBSCRIPTIONS_PER_PUBLIC_USER') ?: environment.getProperty('server.ws.limits.max_subscriptions_per_public_user') ?: '0' }")
private long maxWsSubscriptionsPerPublicUser;
@Value("#{ environment.getProperty('TB_SERVER_WS_TENANT_RATE_LIMITS_MAX_UPDATES_PER_SESSION') ?: environment.getProperty('server.ws.limits.max_updates_per_session') ?: '300:1,3000:60' }")
private String wsUpdatesPerSessionRateLimit;
@Value("#{ environment.getProperty('CASSANDRA_QUERY_TENANT_RATE_LIMITS_ENABLED') ?: environment.getProperty('cassandra.query.tenant_rate_limits.enabled') ?: 'false' }")
private boolean cassandraQueryTenantRateLimitsEnabled;
@Value("#{ environment.getProperty('CASSANDRA_QUERY_TENANT_RATE_LIMITS_CONFIGURATION') ?: environment.getProperty('cassandra.query.tenant_rate_limits.configuration') ?: '1000:1,30000:60' }")
private String cassandraQueryTenantRateLimitsConfiguration;
@Autowired
private TenantProfileService tenantProfileService;
@Override
protected boolean forceReportTotal() {
return true;
}
@Override
protected String getName() {
return "Rate limits updater";
}
@Override
protected PageData<TenantProfile> findEntities(String id, PageLink pageLink) {
return tenantProfileService.findTenantProfiles(TenantId.SYS_TENANT_ID, pageLink);
}
@Override
protected void updateEntity(TenantProfile tenantProfile) {
var profileConfiguration = tenantProfile.getDefaultProfileConfiguration();
if (tenantServerRestLimitsEnabled && StringUtils.isNotEmpty(tenantServerRestLimitsConfiguration)) {
profileConfiguration.setTenantServerRestLimitsConfiguration(tenantServerRestLimitsConfiguration);
}
if (customerServerRestLimitsEnabled && StringUtils.isNotEmpty(customerServerRestLimitsConfiguration)) {
profileConfiguration.setCustomerServerRestLimitsConfiguration(customerServerRestLimitsConfiguration);
}
profileConfiguration.setMaxWsSessionsPerTenant(maxWsSessionsPerTenant);
profileConfiguration.setMaxWsSessionsPerCustomer(maxWsSessionsPerCustomer);
profileConfiguration.setMaxWsSessionsPerPublicUser(maxWsSessionsPerPublicUser);
profileConfiguration.setMaxWsSessionsPerRegularUser(maxWsSessionsPerRegularUser);
profileConfiguration.setMaxWsSubscriptionsPerTenant(maxWsSubscriptionsPerTenant);
profileConfiguration.setMaxWsSubscriptionsPerCustomer(maxWsSubscriptionsPerCustomer);
profileConfiguration.setMaxWsSubscriptionsPerPublicUser(maxWsSubscriptionsPerPublicUser);
profileConfiguration.setMaxWsSubscriptionsPerRegularUser(maxWsSubscriptionsPerRegularUser);
profileConfiguration.setWsMsgQueueLimitPerSession(wsMsgQueueLimitPerSession);
if (StringUtils.isNotEmpty(wsUpdatesPerSessionRateLimit)) {
profileConfiguration.setWsUpdatesPerSessionRateLimit(wsUpdatesPerSessionRateLimit);
}
if (cassandraQueryTenantRateLimitsEnabled && StringUtils.isNotEmpty(cassandraQueryTenantRateLimitsConfiguration)) {
profileConfiguration.setCassandraQueryTenantRateLimitsConfiguration(cassandraQueryTenantRateLimitsConfiguration);
}
tenantProfileService.saveTenantProfile(TenantId.SYS_TENANT_ID, tenantProfile);
}
}

2
application/src/main/java/org/thingsboard/server/service/notification/DefaultNotificationCenter.java

@ -62,7 +62,7 @@ import org.thingsboard.server.dao.notification.NotificationService;
import org.thingsboard.server.dao.notification.NotificationSettingsService;
import org.thingsboard.server.dao.notification.NotificationTargetService;
import org.thingsboard.server.dao.notification.NotificationTemplateService;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.gen.transport.TransportProtos;
import org.thingsboard.server.queue.common.TbProtoQueueMsg;
import org.thingsboard.server.queue.discovery.TopicService;

2
application/src/main/java/org/thingsboard/server/service/notification/rule/DefaultNotificationRuleProcessor.java

@ -41,7 +41,7 @@ import org.thingsboard.server.common.msg.notification.NotificationRuleProcessor;
import org.thingsboard.server.common.msg.plugin.ComponentLifecycleMsg;
import org.thingsboard.server.common.msg.queue.ServiceType;
import org.thingsboard.server.dao.notification.NotificationRequestService;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.queue.discovery.PartitionService;
import org.thingsboard.server.queue.notification.NotificationDeduplicationService;
import org.thingsboard.server.service.executors.NotificationExecutorService;

62
application/src/main/java/org/thingsboard/server/service/notification/rule/trigger/EdgeCommunicationFailureTriggerProcessor.java

@ -0,0 +1,62 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.notification.rule.trigger;
import lombok.RequiredArgsConstructor;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.data.notification.info.EdgeCommunicationFailureNotificationInfo;
import org.thingsboard.server.common.data.notification.info.RuleOriginatedNotificationInfo;
import org.thingsboard.server.common.data.notification.rule.trigger.EdgeCommunicationFailureTrigger;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeCommunicationFailureNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.NotificationRuleTriggerType;
@Service
@RequiredArgsConstructor
public class EdgeCommunicationFailureTriggerProcessor implements NotificationRuleTriggerProcessor<EdgeCommunicationFailureTrigger, EdgeCommunicationFailureNotificationRuleTriggerConfig> {
@Override
public boolean matchesFilter(EdgeCommunicationFailureTrigger trigger, EdgeCommunicationFailureNotificationRuleTriggerConfig triggerConfig) {
if (CollectionUtils.isNotEmpty(triggerConfig.getEdges())) {
return !triggerConfig.getEdges().contains(trigger.getEdgeId().getId());
}
return true;
}
@Override
public RuleOriginatedNotificationInfo constructNotificationInfo(EdgeCommunicationFailureTrigger trigger) {
return EdgeCommunicationFailureNotificationInfo.builder()
.tenantId(trigger.getTenantId())
.edgeId(trigger.getEdgeId())
.customerId(trigger.getCustomerId())
.edgeName(trigger.getEdgeName())
.failureMsg(truncateFailureMsg(trigger.getFailureMsg()))
.build();
}
@Override
public NotificationRuleTriggerType getTriggerType() {
return NotificationRuleTriggerType.EDGE_COMMUNICATION_FAILURE;
}
private String truncateFailureMsg(String input) {
int maxLength = 500;
if (input != null && input.length() > maxLength) {
return input.substring(0, maxLength);
}
return input;
}
}

60
application/src/main/java/org/thingsboard/server/service/notification/rule/trigger/EdgeConnectionTriggerProcessor.java

@ -0,0 +1,60 @@
/**
* Copyright © 2016-2024 The Thingsboard Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.thingsboard.server.service.notification.rule.trigger;
import lombok.RequiredArgsConstructor;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.data.notification.info.EdgeConnectionNotificationInfo;
import org.thingsboard.server.common.data.notification.info.RuleOriginatedNotificationInfo;
import org.thingsboard.server.common.data.notification.rule.trigger.EdgeConnectionTrigger;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeConnectionNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeConnectionNotificationRuleTriggerConfig.EdgeConnectivityEvent;
import org.thingsboard.server.common.data.notification.rule.trigger.config.NotificationRuleTriggerType;
@Service
@RequiredArgsConstructor
public class EdgeConnectionTriggerProcessor implements NotificationRuleTriggerProcessor<EdgeConnectionTrigger, EdgeConnectionNotificationRuleTriggerConfig> {
@Override
public boolean matchesFilter(EdgeConnectionTrigger trigger, EdgeConnectionNotificationRuleTriggerConfig triggerConfig) {
EdgeConnectivityEvent event = trigger.isConnected() ? EdgeConnectivityEvent.CONNECTED : EdgeConnectivityEvent.DISCONNECTED;
if (CollectionUtils.isEmpty(triggerConfig.getNotifyOn()) || !triggerConfig.getNotifyOn().contains(event)) {
return false;
}
if (CollectionUtils.isNotEmpty(triggerConfig.getEdges())) {
return triggerConfig.getEdges().contains(trigger.getEdgeId().getId());
}
return true;
}
@Override
public RuleOriginatedNotificationInfo constructNotificationInfo(EdgeConnectionTrigger trigger) {
return EdgeConnectionNotificationInfo.builder()
.eventType(trigger.isConnected() ? "connected" : "disconnected")
.tenantId(trigger.getTenantId())
.customerId(trigger.getCustomerId())
.edgeId(trigger.getEdgeId())
.edgeName(trigger.getEdgeName())
.build();
}
@Override
public NotificationRuleTriggerType getTriggerType() {
return NotificationRuleTriggerType.EDGE_CONNECTION;
}
}

64
application/src/main/java/org/thingsboard/server/service/queue/DefaultTbClusterService.java

@ -62,6 +62,8 @@ import org.thingsboard.server.common.msg.rule.engine.DeviceNameOrTypeUpdateMsg;
import org.thingsboard.server.dao.edge.EdgeService;
import org.thingsboard.server.gen.transport.TransportProtos;
import org.thingsboard.server.gen.transport.TransportProtos.FromDeviceRPCResponseProto;
import org.thingsboard.server.gen.transport.TransportProtos.QueueDeleteMsg;
import org.thingsboard.server.gen.transport.TransportProtos.QueueUpdateMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ToCoreMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ToCoreNotificationMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ToRuleEngineMsg;
@ -81,9 +83,11 @@ import org.thingsboard.server.service.profile.TbAssetProfileCache;
import org.thingsboard.server.service.profile.TbDeviceProfileCache;
import java.util.Optional;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static org.thingsboard.server.common.util.ProtoUtils.toProto;
@ -563,40 +567,40 @@ public class DefaultTbClusterService implements TbClusterService {
}
@Override
public void onQueueChange(Queue queue) {
log.trace("[{}][{}] Processing queue change [{}] event", queue.getTenantId(), queue.getId(), queue.getName());
TransportProtos.QueueUpdateMsg queueUpdateMsg = TransportProtos.QueueUpdateMsg.newBuilder()
.setTenantIdMSB(queue.getTenantId().getId().getMostSignificantBits())
.setTenantIdLSB(queue.getTenantId().getId().getLeastSignificantBits())
.setQueueIdMSB(queue.getId().getId().getMostSignificantBits())
.setQueueIdLSB(queue.getId().getId().getLeastSignificantBits())
.setQueueName(queue.getName())
.setQueueTopic(queue.getTopic())
.setPartitions(queue.getPartitions())
.build();
ToRuleEngineNotificationMsg ruleEngineMsg = ToRuleEngineNotificationMsg.newBuilder().setQueueUpdateMsg(queueUpdateMsg).build();
ToCoreNotificationMsg coreMsg = ToCoreNotificationMsg.newBuilder().setQueueUpdateMsg(queueUpdateMsg).build();
ToTransportMsg transportMsg = ToTransportMsg.newBuilder().setQueueUpdateMsg(queueUpdateMsg).build();
public void onQueuesUpdate(List<Queue> queues) {
List<QueueUpdateMsg> queueUpdateMsgs = queues.stream()
.map(queue -> QueueUpdateMsg.newBuilder()
.setTenantIdMSB(queue.getTenantId().getId().getMostSignificantBits())
.setTenantIdLSB(queue.getTenantId().getId().getLeastSignificantBits())
.setQueueIdMSB(queue.getId().getId().getMostSignificantBits())
.setQueueIdLSB(queue.getId().getId().getLeastSignificantBits())
.setQueueName(queue.getName())
.setQueueTopic(queue.getTopic())
.setPartitions(queue.getPartitions())
.build())
.collect(Collectors.toList());
ToRuleEngineNotificationMsg ruleEngineMsg = ToRuleEngineNotificationMsg.newBuilder().addAllQueueUpdateMsgs(queueUpdateMsgs).build();
ToCoreNotificationMsg coreMsg = ToCoreNotificationMsg.newBuilder().addAllQueueUpdateMsgs(queueUpdateMsgs).build();
ToTransportMsg transportMsg = ToTransportMsg.newBuilder().addAllQueueUpdateMsgs(queueUpdateMsgs).build();
doSendQueueNotifications(ruleEngineMsg, coreMsg, transportMsg);
}
@Override
public void onQueueDelete(Queue queue) {
log.trace("[{}][{}] Processing queue delete [{}] event", queue.getTenantId(), queue.getId(), queue.getName());
TransportProtos.QueueDeleteMsg queueDeleteMsg = TransportProtos.QueueDeleteMsg.newBuilder()
.setTenantIdMSB(queue.getTenantId().getId().getMostSignificantBits())
.setTenantIdLSB(queue.getTenantId().getId().getLeastSignificantBits())
.setQueueIdMSB(queue.getId().getId().getMostSignificantBits())
.setQueueIdLSB(queue.getId().getId().getLeastSignificantBits())
.setQueueName(queue.getName())
.build();
ToRuleEngineNotificationMsg ruleEngineMsg = ToRuleEngineNotificationMsg.newBuilder().setQueueDeleteMsg(queueDeleteMsg).build();
ToCoreNotificationMsg coreMsg = ToCoreNotificationMsg.newBuilder().setQueueDeleteMsg(queueDeleteMsg).build();
ToTransportMsg transportMsg = ToTransportMsg.newBuilder().setQueueDeleteMsg(queueDeleteMsg).build();
public void onQueuesDelete(List<Queue> queues) {
List<QueueDeleteMsg> queueDeleteMsgs = queues.stream()
.map(queue -> QueueDeleteMsg.newBuilder()
.setTenantIdMSB(queue.getTenantId().getId().getMostSignificantBits())
.setTenantIdLSB(queue.getTenantId().getId().getLeastSignificantBits())
.setQueueIdMSB(queue.getId().getId().getMostSignificantBits())
.setQueueIdLSB(queue.getId().getId().getLeastSignificantBits())
.setQueueName(queue.getName())
.build())
.collect(Collectors.toList());
ToRuleEngineNotificationMsg ruleEngineMsg = ToRuleEngineNotificationMsg.newBuilder().addAllQueueDeleteMsgs(queueDeleteMsgs).build();
ToCoreNotificationMsg coreMsg = ToCoreNotificationMsg.newBuilder().addAllQueueDeleteMsgs(queueDeleteMsgs).build();
ToTransportMsg transportMsg = ToTransportMsg.newBuilder().addAllQueueDeleteMsgs(queueDeleteMsgs).build();
doSendQueueNotifications(ruleEngineMsg, coreMsg, transportMsg);
}

10
application/src/main/java/org/thingsboard/server/service/queue/DefaultTbCoreConsumerService.java

@ -391,13 +391,11 @@ public class DefaultTbCoreConsumerService extends AbstractConsumerService<ToCore
} else if (!toCoreNotification.getFromEdgeSyncResponseMsg().isEmpty()) {
//will be removed in 3.6.1 in favour of hasFromEdgeSyncResponse()
forwardToAppActor(id, encodingService.decode(toCoreNotification.getFromEdgeSyncResponseMsg().toByteArray()), callback);
} else if (toCoreNotification.hasQueueUpdateMsg()) {
TransportProtos.QueueUpdateMsg queue = toCoreNotification.getQueueUpdateMsg();
partitionService.updateQueue(queue);
} else if (toCoreNotification.getQueueUpdateMsgsCount() > 0) {
partitionService.updateQueues(toCoreNotification.getQueueUpdateMsgsList());
callback.onSuccess();
} else if (toCoreNotification.hasQueueDeleteMsg()) {
TransportProtos.QueueDeleteMsg queue = toCoreNotification.getQueueDeleteMsg();
partitionService.removeQueue(queue);
} else if (toCoreNotification.getQueueDeleteMsgsCount() > 0) {
partitionService.removeQueues(toCoreNotification.getQueueDeleteMsgsList());
callback.onSuccess();
} else if (toCoreNotification.hasVcResponseMsg()) {
vcQueueService.processResponse(toCoreNotification.getVcResponseMsg());

71
application/src/main/java/org/thingsboard/server/service/queue/DefaultTbRuleEngineConsumerService.java

@ -36,6 +36,8 @@ import org.thingsboard.server.common.util.ProtoUtils;
import org.thingsboard.server.dao.queue.QueueService;
import org.thingsboard.server.dao.tenant.TbTenantProfileCache;
import org.thingsboard.server.gen.transport.TransportProtos;
import org.thingsboard.server.gen.transport.TransportProtos.QueueDeleteMsg;
import org.thingsboard.server.gen.transport.TransportProtos.QueueUpdateMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ToRuleEngineNotificationMsg;
import org.thingsboard.server.queue.common.TbProtoQueueMsg;
import org.thingsboard.server.queue.discovery.PartitionService;
@ -164,11 +166,11 @@ public class DefaultTbRuleEngineConsumerService extends AbstractConsumerService<
, proto.getResponse(), error);
tbDeviceRpcService.processRpcResponseFromDevice(response);
callback.onSuccess();
} else if (nfMsg.hasQueueUpdateMsg()) {
updateQueue(nfMsg.getQueueUpdateMsg());
} else if (nfMsg.getQueueUpdateMsgsCount() > 0) {
updateQueues(nfMsg.getQueueUpdateMsgsList());
callback.onSuccess();
} else if (nfMsg.hasQueueDeleteMsg()) {
deleteQueue(nfMsg.getQueueDeleteMsg());
} else if (nfMsg.getQueueDeleteMsgsCount() > 0) {
deleteQueues(nfMsg.getQueueDeleteMsgsList());
callback.onSuccess();
} else {
log.trace("Received notification with missing handler");
@ -176,39 +178,48 @@ public class DefaultTbRuleEngineConsumerService extends AbstractConsumerService<
}
}
private void updateQueue(TransportProtos.QueueUpdateMsg queueUpdateMsg) {
log.info("Received queue update msg: [{}]", queueUpdateMsg);
TenantId tenantId = new TenantId(new UUID(queueUpdateMsg.getTenantIdMSB(), queueUpdateMsg.getTenantIdLSB()));
if (partitionService.isManagedByCurrentService(tenantId)) {
QueueId queueId = new QueueId(new UUID(queueUpdateMsg.getQueueIdMSB(), queueUpdateMsg.getQueueIdLSB()));
String queueName = queueUpdateMsg.getQueueName();
QueueKey queueKey = new QueueKey(ServiceType.TB_RULE_ENGINE, queueName, tenantId);
Queue queue = queueService.findQueueById(tenantId, queueId);
TbRuleEngineQueueConsumerManager consumerManager = getOrCreateConsumer(queueKey);
Queue oldQueue = consumerManager.getQueue();
consumerManager.update(queue);
if (oldQueue != null && queue.getPartitions() == oldQueue.getPartitions()) {
return;
private void updateQueues(List<QueueUpdateMsg> queueUpdateMsgs) {
boolean partitionsChanged = false;
for (QueueUpdateMsg queueUpdateMsg : queueUpdateMsgs) {
log.info("Received queue update msg: [{}]", queueUpdateMsg);
TenantId tenantId = new TenantId(new UUID(queueUpdateMsg.getTenantIdMSB(), queueUpdateMsg.getTenantIdLSB()));
if (partitionService.isManagedByCurrentService(tenantId)) {
QueueId queueId = new QueueId(new UUID(queueUpdateMsg.getQueueIdMSB(), queueUpdateMsg.getQueueIdLSB()));
String queueName = queueUpdateMsg.getQueueName();
QueueKey queueKey = new QueueKey(ServiceType.TB_RULE_ENGINE, queueName, tenantId);
Queue queue = queueService.findQueueById(tenantId, queueId);
TbRuleEngineQueueConsumerManager consumerManager = getOrCreateConsumer(queueKey);
Queue oldQueue = consumerManager.getQueue();
consumerManager.update(queue);
if (oldQueue == null || queue.getPartitions() != oldQueue.getPartitions()) {
partitionsChanged = true;
}
} else {
partitionsChanged = true;
}
}
partitionService.updateQueue(queueUpdateMsg);
partitionService.recalculatePartitions(ctx.getServiceInfoProvider().getServiceInfo(),
new ArrayList<>(partitionService.getOtherServices(ServiceType.TB_RULE_ENGINE)));
if (partitionsChanged) {
partitionService.updateQueues(queueUpdateMsgs);
partitionService.recalculatePartitions(ctx.getServiceInfoProvider().getServiceInfo(),
new ArrayList<>(partitionService.getOtherServices(ServiceType.TB_RULE_ENGINE)));
}
}
private void deleteQueue(TransportProtos.QueueDeleteMsg queueDeleteMsg) {
log.info("Received queue delete msg: [{}]", queueDeleteMsg);
TenantId tenantId = new TenantId(new UUID(queueDeleteMsg.getTenantIdMSB(), queueDeleteMsg.getTenantIdLSB()));
QueueKey queueKey = new QueueKey(ServiceType.TB_RULE_ENGINE, queueDeleteMsg.getQueueName(), tenantId);
var consumerManager = consumers.remove(queueKey);
if (consumerManager != null) {
consumerManager.delete(true);
private void deleteQueues(List<QueueDeleteMsg> queueDeleteMsgs) {
for (QueueDeleteMsg queueDeleteMsg : queueDeleteMsgs) {
log.info("Received queue delete msg: [{}]", queueDeleteMsg);
TenantId tenantId = new TenantId(new UUID(queueDeleteMsg.getTenantIdMSB(), queueDeleteMsg.getTenantIdLSB()));
QueueKey queueKey = new QueueKey(ServiceType.TB_RULE_ENGINE, queueDeleteMsg.getQueueName(), tenantId);
var consumerManager = consumers.remove(queueKey);
if (consumerManager != null) {
consumerManager.delete(true);
}
}
partitionService.removeQueue(queueDeleteMsg);
partitionService.removeQueues(queueDeleteMsgs);
partitionService.recalculatePartitions(ctx.getServiceInfoProvider().getServiceInfo(), new ArrayList<>(partitionService.getOtherServices(ServiceType.TB_RULE_ENGINE)));
}

4
application/src/main/java/org/thingsboard/server/service/queue/TbCoreConsumerStats.java

@ -184,9 +184,9 @@ public class TbCoreConsumerStats {
toCoreNfEdgeSyncResponseCounter.increment();
} else if (!msg.getFromEdgeSyncResponseMsg().isEmpty()) {
toCoreNfEdgeSyncResponseCounter.increment();
} else if (msg.hasQueueUpdateMsg()) {
} else if (msg.getQueueUpdateMsgsCount() > 0) {
toCoreNfQueueUpdateCounter.increment();
} else if (msg.hasQueueDeleteMsg()) {
} else if (msg.getQueueDeleteMsgsCount() > 0) {
toCoreNfQueueDeleteCounter.increment();
} else if (msg.hasVcResponseMsg()) {
toCoreNfVersionControlResponseCounter.increment();

1
application/src/main/java/org/thingsboard/server/service/queue/TbMsgPackCallback.java

@ -24,7 +24,6 @@ import org.thingsboard.server.common.data.id.TenantId;
import org.thingsboard.server.common.msg.queue.RuleEngineException;
import org.thingsboard.server.common.msg.queue.RuleNodeInfo;
import org.thingsboard.server.common.msg.queue.TbMsgCallback;
import org.thingsboard.server.common.msg.tools.TbRateLimitsException;
import java.util.UUID;
import java.util.concurrent.TimeUnit;

12
application/src/main/java/org/thingsboard/server/service/queue/processing/AbstractTbRuleEngineSubmitStrategy.java

@ -15,6 +15,7 @@
*/
package org.thingsboard.server.service.queue.processing;
import org.thingsboard.server.common.data.StringUtils;
import org.thingsboard.server.gen.transport.TransportProtos;
import org.thingsboard.server.queue.common.TbProtoQueueMsg;
@ -51,7 +52,16 @@ public abstract class AbstractTbRuleEngineSubmitStrategy implements TbRuleEngine
List<IdMsgPair<TransportProtos.ToRuleEngineMsg>> newOrderedMsgList = new ArrayList<>(reprocessMap.size());
for (IdMsgPair<TransportProtos.ToRuleEngineMsg> pair : orderedMsgList) {
if (reprocessMap.containsKey(pair.uuid)) {
newOrderedMsgList.add(pair);
if (StringUtils.isNotEmpty(pair.getMsg().getValue().getFailureMessage())) {
var toRuleEngineMsg = TransportProtos.ToRuleEngineMsg.newBuilder(pair.getMsg().getValue())
.clearFailureMessage()
.clearRelationTypes()
.build();
var newMsg = new TbProtoQueueMsg<>(pair.getMsg().getKey(), toRuleEngineMsg, pair.getMsg().getHeaders());
newOrderedMsgList.add(new IdMsgPair<>(pair.getUuid(), newMsg));
} else {
newOrderedMsgList.add(pair);
}
}
}
orderedMsgList = newOrderedMsgList;

2
application/src/main/java/org/thingsboard/server/service/security/auth/mfa/DefaultTwoFactorAuthService.java

@ -32,7 +32,7 @@ import org.thingsboard.server.common.data.security.model.mfa.provider.TwoFaProvi
import org.thingsboard.server.common.data.security.model.mfa.provider.TwoFaProviderType;
import org.thingsboard.server.dao.user.UserService;
import org.thingsboard.server.common.data.limit.LimitedApi;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.queue.util.TbCoreComponent;
import org.thingsboard.server.service.security.auth.mfa.config.TwoFaConfigManager;
import org.thingsboard.server.service.security.auth.mfa.provider.TwoFaProvider;

8
application/src/main/java/org/thingsboard/server/service/subscription/DefaultSubscriptionManagerService.java

@ -19,7 +19,6 @@ import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.context.event.EventListener;
import org.springframework.stereotype.Service;
import org.thingsboard.server.common.msg.rule.engine.DeviceAttributesEventNotificationMsg;
import org.thingsboard.server.cluster.TbClusterService;
import org.thingsboard.server.common.data.DataConstants;
import org.thingsboard.server.common.data.EntityType;
@ -37,15 +36,16 @@ import org.thingsboard.server.common.data.kv.TsKvEntry;
import org.thingsboard.server.common.msg.queue.ServiceType;
import org.thingsboard.server.common.msg.queue.TbCallback;
import org.thingsboard.server.common.msg.queue.TopicPartitionInfo;
import org.thingsboard.server.common.msg.rule.engine.DeviceAttributesEventNotificationMsg;
import org.thingsboard.server.gen.transport.TransportProtos.ToCoreNotificationMsg;
import org.thingsboard.server.queue.TbQueueProducer;
import org.thingsboard.server.queue.common.TbProtoQueueMsg;
import org.thingsboard.server.queue.discovery.TopicService;
import org.thingsboard.server.queue.discovery.PartitionService;
import org.thingsboard.server.queue.discovery.TbApplicationEventListener;
import org.thingsboard.server.queue.discovery.TbServiceInfoProvider;
import org.thingsboard.server.queue.discovery.event.PartitionChangeEvent;
import org.thingsboard.server.queue.discovery.TopicService;
import org.thingsboard.server.queue.discovery.event.OtherServiceShutdownEvent;
import org.thingsboard.server.queue.discovery.event.PartitionChangeEvent;
import org.thingsboard.server.queue.provider.TbQueueProducerProvider;
import org.thingsboard.server.queue.util.TbCoreComponent;
import org.thingsboard.server.service.state.DefaultDeviceStateService;
@ -154,7 +154,7 @@ public class DefaultSubscriptionManagerService extends TbApplicationEventListene
protected void onTbApplicationEvent(PartitionChangeEvent partitionChangeEvent) {
if (ServiceType.TB_CORE.equals(partitionChangeEvent.getServiceType())) {
entitySubscriptions.values().removeIf(sub ->
!partitionService.resolve(ServiceType.TB_CORE, sub.getTenantId(), sub.getEntityId()).isMyPartition());
!partitionService.isMyPartition(ServiceType.TB_CORE, sub.getTenantId(), sub.getEntityId()));
}
}

2
application/src/main/java/org/thingsboard/server/service/sync/ie/DefaultEntitiesExportImportService.java

@ -32,7 +32,7 @@ import org.thingsboard.server.common.data.sync.ie.EntityImportResult;
import org.thingsboard.server.common.data.util.ThrowingRunnable;
import org.thingsboard.server.dao.exception.DataValidationException;
import org.thingsboard.server.dao.relation.RelationService;
import org.thingsboard.server.dao.util.limits.RateLimitService;
import org.thingsboard.server.cache.limits.RateLimitService;
import org.thingsboard.server.queue.util.TbCoreComponent;
import org.thingsboard.server.service.entitiy.TbNotificationEntityService;
import org.thingsboard.server.service.sync.ie.exporting.EntityExportService;

15
application/src/main/java/org/thingsboard/server/service/sync/ie/exporting/impl/NotificationRuleExportService.java

@ -29,6 +29,8 @@ import org.thingsboard.server.common.data.notification.rule.EscalatedNotificatio
import org.thingsboard.server.common.data.notification.rule.NotificationRule;
import org.thingsboard.server.common.data.notification.rule.NotificationRuleRecipientsConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.DeviceActivityNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeCommunicationFailureNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeConnectionNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.NotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.RuleEngineComponentLifecycleEventNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.sync.ie.EntityExportData;
@ -65,13 +67,24 @@ public class NotificationRuleExportService<I extends EntityId, E extends Exporta
}
break;
}
case RULE_ENGINE_COMPONENT_LIFECYCLE_EVENT:
case RULE_ENGINE_COMPONENT_LIFECYCLE_EVENT: {
RuleEngineComponentLifecycleEventNotificationRuleTriggerConfig triggerConfig = (RuleEngineComponentLifecycleEventNotificationRuleTriggerConfig) ruleTriggerConfig;
Set<UUID> ruleChains = triggerConfig.getRuleChains();
if (ruleChains != null) {
triggerConfig.setRuleChains(toExternalIds(ruleChains, RuleChainId::new, ctx).collect(Collectors.toSet()));
}
break;
}
case EDGE_CONNECTION: {
EdgeConnectionNotificationRuleTriggerConfig triggerConfig = (EdgeConnectionNotificationRuleTriggerConfig) ruleTriggerConfig;
triggerConfig.setEdges(null);
break;
}
case EDGE_COMMUNICATION_FAILURE: {
EdgeCommunicationFailureNotificationRuleTriggerConfig triggerConfig = (EdgeCommunicationFailureNotificationRuleTriggerConfig) ruleTriggerConfig;
triggerConfig.setEdges(null);
break;
}
}
NotificationRuleRecipientsConfig ruleRecipientsConfig = notificationRule.getRecipientsConfig();

15
application/src/main/java/org/thingsboard/server/service/sync/ie/importing/impl/NotificationRuleImportService.java

@ -33,6 +33,8 @@ import org.thingsboard.server.common.data.notification.rule.EscalatedNotificatio
import org.thingsboard.server.common.data.notification.rule.NotificationRule;
import org.thingsboard.server.common.data.notification.rule.NotificationRuleRecipientsConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.DeviceActivityNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeConnectionNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.EdgeCommunicationFailureNotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.NotificationRuleTriggerConfig;
import org.thingsboard.server.common.data.notification.rule.trigger.config.NotificationRuleTriggerType;
import org.thingsboard.server.common.data.notification.rule.trigger.config.RuleEngineComponentLifecycleEventNotificationRuleTriggerConfig;
@ -86,7 +88,7 @@ public class NotificationRuleImportService extends BaseEntityImportService<Notif
}
break;
}
case RULE_ENGINE_COMPONENT_LIFECYCLE_EVENT:
case RULE_ENGINE_COMPONENT_LIFECYCLE_EVENT: {
RuleEngineComponentLifecycleEventNotificationRuleTriggerConfig triggerConfig = (RuleEngineComponentLifecycleEventNotificationRuleTriggerConfig) ruleTriggerConfig;
Set<UUID> ruleChains = triggerConfig.getRuleChains();
if (ruleChains != null) {
@ -95,6 +97,17 @@ public class NotificationRuleImportService extends BaseEntityImportService<Notif
.collect(Collectors.toSet()));
}
break;
}
case EDGE_CONNECTION: {
EdgeConnectionNotificationRuleTriggerConfig triggerConfig = (EdgeConnectionNotificationRuleTriggerConfig) ruleTriggerConfig;
triggerConfig.setEdges(null);
break;
}
case EDGE_COMMUNICATION_FAILURE: {
EdgeCommunicationFailureNotificationRuleTriggerConfig triggerConfig = (EdgeCommunicationFailureNotificationRuleTriggerConfig) ruleTriggerConfig;
triggerConfig.setEdges(null);
break;
}
}
if (!triggerType.isTenantLevel()) {
throw new IllegalArgumentException("Trigger type " + triggerType + " is not available for tenants");

60
application/src/main/resources/thingsboard.yml

@ -1173,14 +1173,18 @@ transport:
bind_port: "${SNMP_BIND_PORT:1620}"
response_processing:
# parallelism level for executor (workStealingPool) that is responsible for handling responses from SNMP devices
parallelism_level: "${SNMP_RESPONSE_PROCESSING_PARALLELISM_LEVEL:20}"
parallelism_level: "${SNMP_RESPONSE_PROCESSING_PARALLELISM_LEVEL:4}"
# to configure SNMP to work over UDP or TCP
underlying_protocol: "${SNMP_UNDERLYING_PROTOCOL:udp}"
# Batch size to request OID mappings from the device (useful when the device profile has multiple hundreds of OID mappings)
# Maximum size of a PDU (amount of OID mappings in a single SNMP request). The request will be split into multiple PDUs if mappings amount exceeds this number
max_request_oids: "${SNMP_MAX_REQUEST_OIDS:100}"
# Delay after sending each request chunk (in case the request was split into multiple PDUs due to max_request_oids)
request_chunk_delay_ms: "${SNMP_REQUEST_CHUNK_DELAY_MS:100}"
response:
# To ignore SNMP response values that do not match the data type of the configured OID mapping (by default false - will throw an error if any value of the response not match configured data types)
ignore_type_cast_errors: "${SNMP_RESPONSE_IGNORE_TYPE_CAST_ERRORS:false}"
# Thread pool size for scheduler that executes device querying tasks
scheduler_thread_pool_size: "${SNMP_SCHEDULER_THREAD_POOL_SIZE:4}"
stats:
# Enable/Disable the collection of transport statistics
enabled: "${TB_TRANSPORT_STATS_ENABLED:true}"
@ -1601,58 +1605,6 @@ queue:
print-interval-ms: "${TB_QUEUE_RULE_ENGINE_STATS_PRINT_INTERVAL_MS:60000}"
# Max length of the error message that is printed by statistics
max-error-message-length: "${TB_QUEUE_RULE_ENGINE_MAX_ERROR_MESSAGE_LENGTH:4096}"
queues:
- name: "${TB_QUEUE_RE_MAIN_QUEUE_NAME:Main}" # queue name
topic: "${TB_QUEUE_RE_MAIN_TOPIC:tb_rule_engine.main}" # queue topic
poll-interval: "${TB_QUEUE_RE_MAIN_POLL_INTERVAL_MS:25}" # poll interval
partitions: "${TB_QUEUE_RE_MAIN_PARTITIONS:10}" # number queue partitions
consumer-per-partition: "${TB_QUEUE_RE_MAIN_CONSUMER_PER_PARTITION:true}" # if true - use for each customer different partition
pack-processing-timeout: "${TB_QUEUE_RE_MAIN_PACK_PROCESSING_TIMEOUT_MS:2000}" # Timeout for processing a message pack
submit-strategy:
type: "${TB_QUEUE_RE_MAIN_SUBMIT_STRATEGY_TYPE:BURST}" # BURST, BATCH, SEQUENTIAL_BY_ORIGINATOR, SEQUENTIAL_BY_TENANT, SEQUENTIAL
# For BATCH only
batch-size: "${TB_QUEUE_RE_MAIN_SUBMIT_STRATEGY_BATCH_SIZE:1000}" # Maximum number of messages in batch
processing-strategy:
type: "${TB_QUEUE_RE_MAIN_PROCESSING_STRATEGY_TYPE:SKIP_ALL_FAILURES}" # SKIP_ALL_FAILURES, SKIP_ALL_FAILURES_AND_TIMED_OUT, RETRY_ALL, RETRY_FAILED, RETRY_TIMED_OUT, RETRY_FAILED_AND_TIMED_OUT
# For RETRY_ALL, RETRY_FAILED, RETRY_TIMED_OUT, RETRY_FAILED_AND_TIMED_OUT
retries: "${TB_QUEUE_RE_MAIN_PROCESSING_STRATEGY_RETRIES:3}" # Number of retries, 0 is unlimited
failure-percentage: "${TB_QUEUE_RE_MAIN_PROCESSING_STRATEGY_FAILURE_PERCENTAGE:0}" # Skip retry if failures or timeouts are less than X percentage of messages;
pause-between-retries: "${TB_QUEUE_RE_MAIN_PROCESSING_STRATEGY_RETRY_PAUSE:3}" # Time in seconds to wait in consumer thread before retries;
max-pause-between-retries: "${TB_QUEUE_RE_MAIN_PROCESSING_STRATEGY_MAX_RETRY_PAUSE:3}" # Max allowed time in seconds for pause between retries.
- name: "${TB_QUEUE_RE_HP_QUEUE_NAME:HighPriority}" # queue name
topic: "${TB_QUEUE_RE_HP_TOPIC:tb_rule_engine.hp}" # queue topic
poll-interval: "${TB_QUEUE_RE_HP_POLL_INTERVAL_MS:25}" # poll interval
partitions: "${TB_QUEUE_RE_HP_PARTITIONS:10}" # number queue partitions
consumer-per-partition: "${TB_QUEUE_RE_HP_CONSUMER_PER_PARTITION:true}" # if true - use for each customer different partition
pack-processing-timeout: "${TB_QUEUE_RE_HP_PACK_PROCESSING_TIMEOUT_MS:2000}" # Timeout for processing a message pack
submit-strategy:
type: "${TB_QUEUE_RE_HP_SUBMIT_STRATEGY_TYPE:BURST}" # BURST, BATCH, SEQUENTIAL_BY_ORIGINATOR, SEQUENTIAL_BY_TENANT, SEQUENTIAL
# For BATCH only
batch-size: "${TB_QUEUE_RE_HP_SUBMIT_STRATEGY_BATCH_SIZE:100}" # Maximum number of messages in batch
processing-strategy:
type: "${TB_QUEUE_RE_HP_PROCESSING_STRATEGY_TYPE:RETRY_FAILED_AND_TIMED_OUT}" # SKIP_ALL_FAILURES, SKIP_ALL_FAILURES_AND_TIMED_OUT, RETRY_ALL, RETRY_FAILED, RETRY_TIMED_OUT, RETRY_FAILED_AND_TIMED_OUT
# For RETRY_ALL, RETRY_FAILED, RETRY_TIMED_OUT, RETRY_FAILED_AND_TIMED_OUT
retries: "${TB_QUEUE_RE_HP_PROCESSING_STRATEGY_RETRIES:0}" # Number of retries, 0 is unlimited
failure-percentage: "${TB_QUEUE_RE_HP_PROCESSING_STRATEGY_FAILURE_PERCENTAGE:0}" # Skip retry if failures or timeouts are less than X percentage of messages;
pause-between-retries: "${TB_QUEUE_RE_HP_PROCESSING_STRATEGY_RETRY_PAUSE:5}" # Time in seconds to wait in consumer thread before retries;
max-pause-between-retries: "${TB_QUEUE_RE_HP_PROCESSING_STRATEGY_MAX_RETRY_PAUSE:5}" # Max allowed time in seconds for pause between retries.
- name: "${TB_QUEUE_RE_SQ_QUEUE_NAME:SequentialByOriginator}" # queue name
topic: "${TB_QUEUE_RE_SQ_TOPIC:tb_rule_engine.sq}" # queue topic
poll-interval: "${TB_QUEUE_RE_SQ_POLL_INTERVAL_MS:25}" # poll interval
partitions: "${TB_QUEUE_RE_SQ_PARTITIONS:10}" # number queue partitions
consumer-per-partition: "${TB_QUEUE_RE_SQ_CONSUMER_PER_PARTITION:true}" # if true - use for each customer different partition
pack-processing-timeout: "${TB_QUEUE_RE_SQ_PACK_PROCESSING_TIMEOUT_MS:2000}" # Timeout for processing a message pack
submit-strategy:
type: "${TB_QUEUE_RE_SQ_SUBMIT_STRATEGY_TYPE:SEQUENTIAL_BY_ORIGINATOR}" # BURST, BATCH, SEQUENTIAL_BY_ORIGINATOR, SEQUENTIAL_BY_TENANT, SEQUENTIAL
# For BATCH only
batch-size: "${TB_QUEUE_RE_SQ_SUBMIT_STRATEGY_BATCH_SIZE:100}" # Maximum number of messages in batch
processing-strategy:
type: "${TB_QUEUE_RE_SQ_PROCESSING_STRATEGY_TYPE:RETRY_FAILED_AND_TIMED_OUT}" # SKIP_ALL_FAILURES, SKIP_ALL_FAILURES_AND_TIMED_OUT, RETRY_ALL, RETRY_FAILED, RETRY_TIMED_OUT, RETRY_FAILED_AND_TIMED_OUT
# For RETRY_ALL, RETRY_FAILED, RETRY_TIMED_OUT, RETRY_FAILED_AND_TIMED_OUT
retries: "${TB_QUEUE_RE_SQ_PROCESSING_STRATEGY_RETRIES:3}" # Number of retries, 0 is unlimited
failure-percentage: "${TB_QUEUE_RE_SQ_PROCESSING_STRATEGY_FAILURE_PERCENTAGE:0}" # Skip retry if failures or timeouts are less than X percentage of messages;
pause-between-retries: "${TB_QUEUE_RE_SQ_PROCESSING_STRATEGY_RETRY_PAUSE:5}" # Time in seconds to wait in consumer thread before retries;
max-pause-between-retries: "${TB_QUEUE_RE_SQ_PROCESSING_STRATEGY_MAX_RETRY_PAUSE:5}" # Max allowed time in seconds for pause between retries.
# After a queue is deleted (or the profile's isolation option was disabled), Rule Engine will continue reading related topics during this period before deleting the actual topics
topic-deletion-delay: "${TB_QUEUE_RULE_ENGINE_TOPIC_DELETION_DELAY_SEC:15}"
# Size of the thread pool that handles such operations as partition changes, config updates, queue deletion

20
application/src/test/java/org/thingsboard/server/controller/AbstractNotifyEntityTest.java

@ -154,24 +154,6 @@ public abstract class AbstractNotifyEntityTest extends AbstractWebTest {
Mockito.reset(tbClusterService, auditLogService);
}
protected void testNotifyManyEntityManyTimeMsgToEdgeServiceNever(HasName entity, HasName originator,
TenantId tenantId, CustomerId customerId, UserId userId, String userName,
ActionType actionType, int cntTime, Object... additionalInfo) {
EntityId entityId = createEntityId_NULL_UUID(entity);
EntityId originatorId = createEntityId_NULL_UUID(originator);
testNotificationMsgToEdgeServiceNeverWithActionType(entityId, actionType);
ArgumentMatcher<HasName> matcherEntityClassEquals = argument -> argument.getClass().equals(entity.getClass());
ArgumentMatcher<EntityId> matcherOriginatorId = argument -> argument.getClass().equals(originatorId.getClass());
ArgumentMatcher<CustomerId> matcherCustomerId = customerId == null ?
argument -> argument.getClass().equals(CustomerId.class) : argument -> argument.equals(customerId);
ArgumentMatcher<UserId> matcherUserId = userId == null ?
argument -> argument.getClass().equals(UserId.class) : argument -> argument.equals(userId);
testLogEntityActionAdditionalInfo(matcherEntityClassEquals, matcherOriginatorId, tenantId, matcherCustomerId, matcherUserId, userName, actionType, cntTime,
extractMatcherAdditionalInfo(additionalInfo));
testPushMsgToRuleEngineTime(matcherOriginatorId, tenantId, entity, cntTime);
Mockito.reset(tbClusterService, auditLogService);
}
protected void testNotifyManyEntityManyTimeMsgToEdgeServiceEntityEqAny(HasName entity, HasName originator,
TenantId tenantId, CustomerId customerId, UserId userId, String userName,
ActionType actionType,
@ -624,7 +606,7 @@ public abstract class AbstractNotifyEntityTest extends AbstractWebTest {
private String entityClassToString(HasName entity) {
String className = entity.getClass().toString()
.substring(entity.getClass().toString().lastIndexOf(".") + 1);
List str = className.chars()
List<String> str = className.chars()
.mapToObj(x -> (Character.isUpperCase(x)) ? "_" + Character.toString(x) : Character.toString(x))
.collect(Collectors.toList());
return String.join("", str).toUpperCase(Locale.ENGLISH).substring(1);

37
application/src/test/java/org/thingsboard/server/edge/DeviceEdgeTest.java

@ -711,7 +711,7 @@ public class DeviceEdgeTest extends AbstractEdgeTest {
edgeImitator.sendUplinkMsg(uplinkMsgBuilder.build());
Assert.assertTrue(edgeImitator.waitForResponses());
Assert.assertTrue(onUpdateCallback.getSubscribeLatch().await(30, TimeUnit.SECONDS));
Assert.assertTrue(onUpdateCallback.getSubscribeLatch().await(TIMEOUT, TimeUnit.SECONDS));
Assert.assertEquals(JacksonUtil.newObjectNode().put(attrKey, attrValue),
JacksonUtil.fromBytes(onUpdateCallback.getPayloadBytes()));
@ -798,7 +798,21 @@ public class DeviceEdgeTest extends AbstractEdgeTest {
// clean up stored edge events
edgeEventService.cleanupEvents(1);
// perform rpc call to verify edgeId in DeviceActorMessageProcessor updated properly
// edge is disconnected: perform rpc call - no edge event saved
doPostAsync(
"/api/rpc/oneway/" + device.getId().getId().toString(),
JacksonUtil.toString(createDefaultRpc()),
String.class,
status().isOk());
Awaitility.await()
.atMost(TIMEOUT, TimeUnit.SECONDS)
.until(() -> {
PageData<EdgeEvent> result = edgeEventService.findEdgeEvents(tenantId, tmpEdge.getId(), 0L, null, new TimePageLink(1));
return result.getTotalElements() == 0;
});
// edge is connected: perform rpc call to verify edgeId in DeviceActorMessageProcessor updated properly
simulateEdgeActivation(tmpEdge);
doPostAsync(
"/api/rpc/oneway/" + device.getId().getId().toString(),
JacksonUtil.toString(createDefaultRpc()),
@ -857,4 +871,23 @@ public class DeviceEdgeTest extends AbstractEdgeTest {
return rpc;
}
private void simulateEdgeActivation(Edge edge) throws Exception {
ObjectNode attributes = JacksonUtil.newObjectNode();
attributes.put("active", true);
doPost("/api/plugins/telemetry/EDGE/" + edge.getId() + "/attributes/" + DataConstants.SERVER_SCOPE, attributes);
Awaitility.await()
.atMost(TIMEOUT, TimeUnit.SECONDS)
.until(() -> {
List<Map<String, Object>> values = doGetAsyncTyped("/api/plugins/telemetry/EDGE/" + edge.getId() +
"/values/attributes/SERVER_SCOPE", new TypeReference<>() {});
Optional<Map<String, Object>> activeAttrOpt = values.stream().filter(att -> att.get("key").equals("active")).findFirst();
if (activeAttrOpt.isEmpty()) {
return false;
}
Map<String, Object> activeAttr = activeAttrOpt.get();
return "true".equals(activeAttr.get("value").toString());
});
}
}

23
application/src/test/java/org/thingsboard/server/edge/RuleChainEdgeTest.java

@ -146,7 +146,7 @@ public class RuleChainEdgeTest extends AbstractEdgeTest {
}
}
private void createRuleChainMetadata(RuleChain ruleChain) {
private RuleChainMetaData createRuleChainMetadata(RuleChain ruleChain) {
RuleChainMetaData ruleChainMetaData = new RuleChainMetaData();
ruleChainMetaData.setRuleChainId(ruleChain.getId());
@ -182,7 +182,7 @@ public class RuleChainEdgeTest extends AbstractEdgeTest {
ruleChainMetaData.addConnectionInfo(0, 2, "fail");
ruleChainMetaData.addConnectionInfo(1, 2, "success");
doPost("/api/ruleChain/metadata", ruleChainMetaData, RuleChainMetaData.class);
return doPost("/api/ruleChain/metadata", ruleChainMetaData, RuleChainMetaData.class);
}
@Test
@ -193,9 +193,10 @@ public class RuleChainEdgeTest extends AbstractEdgeTest {
ruleChain.setType(RuleChainType.EDGE);
RuleChain savedRuleChain = doPost("/api/ruleChain", ruleChain, RuleChain.class);
edgeImitator.expectMessageAmount(1);
edgeImitator.expectMessageAmount(2);
doPost("/api/edge/" + edge.getUuidId()
+ "/ruleChain/" + savedRuleChain.getUuidId(), RuleChain.class);
RuleChainMetaData metaData = createRuleChainMetadata(savedRuleChain);
Assert.assertTrue(edgeImitator.waitForMessages());
// set new rule chain as root
@ -213,6 +214,22 @@ public class RuleChainEdgeTest extends AbstractEdgeTest {
Assert.assertTrue(ruleChainMsg.isRoot());
Assert.assertEquals(savedRuleChain.getId(), ruleChainMsg.getId());
// update metadata for root rule chain
edgeImitator.expectMessageAmount(1);
metaData.getNodes().forEach(n -> n.setDebugMode(true));
doPost("/api/ruleChain/metadata", metaData, RuleChainMetaData.class);
Assert.assertTrue(edgeImitator.waitForMessages());
ruleChainUpdateMsgOpt = edgeImitator.findMessageByType(RuleChainUpdateMsg.class);
Assert.assertTrue(ruleChainUpdateMsgOpt.isPresent());
ruleChainUpdateMsg = ruleChainUpdateMsgOpt.get();
ruleChainMsg = JacksonUtil.fromString(ruleChainUpdateMsg.getEntity(), RuleChain.class, true);
Assert.assertNotNull(ruleChainMsg);
Assert.assertTrue(UpdateMsgType.ENTITY_CREATED_RPC_MESSAGE.equals(ruleChainUpdateMsg.getMsgType()) ||
UpdateMsgType.ENTITY_UPDATED_RPC_MESSAGE.equals(ruleChainUpdateMsg.getMsgType()));
Assert.assertEquals(savedRuleChain.getId(), ruleChainMsg.getId());
Assert.assertEquals(savedRuleChain.getName(), ruleChainMsg.getName());
Assert.assertTrue(ruleChainMsg.isRoot());
// revert root rule chain
edgeImitator.expectMessageAmount(1);
doPost("/api/edge/" + edge.getUuidId()

7
application/src/test/java/org/thingsboard/server/edge/imitator/EdgeImitator.java

@ -26,6 +26,7 @@ import lombok.extern.slf4j.Slf4j;
import org.checkerframework.checker.nullness.qual.Nullable;
import org.thingsboard.edge.rpc.EdgeGrpcClient;
import org.thingsboard.edge.rpc.EdgeRpcClient;
import org.thingsboard.server.controller.AbstractWebTest;
import org.thingsboard.server.gen.edge.v1.AdminSettingsUpdateMsg;
import org.thingsboard.server.gen.edge.v1.AlarmCommentUpdateMsg;
import org.thingsboard.server.gen.edge.v1.AlarmUpdateMsg;
@ -72,8 +73,6 @@ import java.util.stream.Collectors;
@Slf4j
public class EdgeImitator {
public static final int TIMEOUT_IN_SECONDS = 30;
private String routingKey;
private String routingSecret;
@ -344,7 +343,7 @@ public class EdgeImitator {
}
public boolean waitForMessages() throws InterruptedException {
return waitForMessages(TIMEOUT_IN_SECONDS);
return waitForMessages(AbstractWebTest.TIMEOUT);
}
public boolean waitForMessages(int timeoutInSeconds) throws InterruptedException {
@ -359,7 +358,7 @@ public class EdgeImitator {
}
public boolean waitForResponses() throws InterruptedException {
return responsesLatch.await(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS);
return responsesLatch.await(AbstractWebTest.TIMEOUT, TimeUnit.SECONDS);
}
public void expectResponsesAmount(int messageAmount) {

6
application/src/test/java/org/thingsboard/server/queue/discovery/HashPartitionServiceTest.java

@ -315,7 +315,7 @@ public class HashPartitionServiceTest {
.setPartitions(isolatedQueue.getPartitions())
.build();
partitionService_common.updateQueue(queueUpdateMsg);
partitionService_common.updateQueues(List.of(queueUpdateMsg));
partitionService_common.recalculatePartitions(commonRuleEngine, List.of(dedicatedRuleEngine));
// expecting event about no partitions for isolated queue key
verifyPartitionChangeEvent(event -> {
@ -323,7 +323,7 @@ public class HashPartitionServiceTest {
return event.getPartitionsMap().get(queueKey).isEmpty();
});
partitionService_dedicated.updateQueue(queueUpdateMsg);
partitionService_dedicated.updateQueues(List.of(queueUpdateMsg));
partitionService_dedicated.recalculatePartitions(dedicatedRuleEngine, List.of(commonRuleEngine));
verifyPartitionChangeEvent(event -> {
QueueKey queueKey = new QueueKey(ServiceType.TB_RULE_ENGINE, DataConstants.MAIN_QUEUE_NAME, tenantId);
@ -342,7 +342,7 @@ public class HashPartitionServiceTest {
.setQueueIdLSB(isolatedQueue.getUuidId().getLeastSignificantBits())
.setQueueName(isolatedQueue.getName())
.build();
partitionService_dedicated.removeQueue(queueDeleteMsg);
partitionService_dedicated.removeQueues(List.of(queueDeleteMsg));
verifyPartitionChangeEvent(event -> {
QueueKey queueKey = new QueueKey(ServiceType.TB_RULE_ENGINE, DataConstants.MAIN_QUEUE_NAME, tenantId);
return event.getPartitionsMap().get(queueKey).isEmpty();

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save